Feb 16 11:06:49 crc systemd[1]: Starting Kubernetes Kubelet... Feb 16 11:06:49 crc restorecon[4684]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:49 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 16 11:06:50 crc restorecon[4684]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Feb 16 11:06:50 crc kubenswrapper[4949]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 16 11:06:50 crc kubenswrapper[4949]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Feb 16 11:06:50 crc kubenswrapper[4949]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 16 11:06:50 crc kubenswrapper[4949]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 16 11:06:50 crc kubenswrapper[4949]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Feb 16 11:06:50 crc kubenswrapper[4949]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.919931 4949 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928351 4949 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928385 4949 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928394 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928403 4949 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928412 4949 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928421 4949 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928429 4949 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928451 4949 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928462 4949 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928473 4949 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928483 4949 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928492 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928501 4949 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928509 4949 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928517 4949 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928526 4949 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928534 4949 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928541 4949 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928549 4949 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928558 4949 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928565 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928573 4949 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928582 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928589 4949 feature_gate.go:330] unrecognized feature gate: Example Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928598 4949 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928606 4949 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928614 4949 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928621 4949 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928632 4949 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928643 4949 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928651 4949 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928659 4949 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928667 4949 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928675 4949 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928682 4949 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928690 4949 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928700 4949 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928710 4949 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928719 4949 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928730 4949 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928740 4949 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928748 4949 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928758 4949 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928766 4949 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928773 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928781 4949 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928789 4949 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928796 4949 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928804 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928811 4949 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928819 4949 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928827 4949 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928834 4949 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928842 4949 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928849 4949 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928857 4949 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928867 4949 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928876 4949 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928883 4949 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928891 4949 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928898 4949 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928909 4949 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928917 4949 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928924 4949 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928932 4949 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928939 4949 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928947 4949 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928955 4949 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928965 4949 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928975 4949 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.928984 4949 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931337 4949 flags.go:64] FLAG: --address="0.0.0.0" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931369 4949 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931385 4949 flags.go:64] FLAG: --anonymous-auth="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931399 4949 flags.go:64] FLAG: --application-metrics-count-limit="100" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931410 4949 flags.go:64] FLAG: --authentication-token-webhook="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931419 4949 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931432 4949 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931446 4949 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931458 4949 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931469 4949 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931481 4949 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931494 4949 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931505 4949 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931552 4949 flags.go:64] FLAG: --cgroup-root="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931562 4949 flags.go:64] FLAG: --cgroups-per-qos="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931573 4949 flags.go:64] FLAG: --client-ca-file="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931582 4949 flags.go:64] FLAG: --cloud-config="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931593 4949 flags.go:64] FLAG: --cloud-provider="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931601 4949 flags.go:64] FLAG: --cluster-dns="[]" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931612 4949 flags.go:64] FLAG: --cluster-domain="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931621 4949 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931630 4949 flags.go:64] FLAG: --config-dir="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931639 4949 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931648 4949 flags.go:64] FLAG: --container-log-max-files="5" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931660 4949 flags.go:64] FLAG: --container-log-max-size="10Mi" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931669 4949 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931678 4949 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931689 4949 flags.go:64] FLAG: --containerd-namespace="k8s.io" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931698 4949 flags.go:64] FLAG: --contention-profiling="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931707 4949 flags.go:64] FLAG: --cpu-cfs-quota="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931716 4949 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931725 4949 flags.go:64] FLAG: --cpu-manager-policy="none" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931735 4949 flags.go:64] FLAG: --cpu-manager-policy-options="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931746 4949 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931755 4949 flags.go:64] FLAG: --enable-controller-attach-detach="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931763 4949 flags.go:64] FLAG: --enable-debugging-handlers="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931772 4949 flags.go:64] FLAG: --enable-load-reader="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931783 4949 flags.go:64] FLAG: --enable-server="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931792 4949 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931804 4949 flags.go:64] FLAG: --event-burst="100" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931814 4949 flags.go:64] FLAG: --event-qps="50" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931823 4949 flags.go:64] FLAG: --event-storage-age-limit="default=0" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931832 4949 flags.go:64] FLAG: --event-storage-event-limit="default=0" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931841 4949 flags.go:64] FLAG: --eviction-hard="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931852 4949 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931860 4949 flags.go:64] FLAG: --eviction-minimum-reclaim="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931869 4949 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931878 4949 flags.go:64] FLAG: --eviction-soft="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931887 4949 flags.go:64] FLAG: --eviction-soft-grace-period="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931896 4949 flags.go:64] FLAG: --exit-on-lock-contention="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931905 4949 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931914 4949 flags.go:64] FLAG: --experimental-mounter-path="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931922 4949 flags.go:64] FLAG: --fail-cgroupv1="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931932 4949 flags.go:64] FLAG: --fail-swap-on="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931940 4949 flags.go:64] FLAG: --feature-gates="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931951 4949 flags.go:64] FLAG: --file-check-frequency="20s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931961 4949 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931970 4949 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931980 4949 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931989 4949 flags.go:64] FLAG: --healthz-port="10248" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.931999 4949 flags.go:64] FLAG: --help="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932008 4949 flags.go:64] FLAG: --hostname-override="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932017 4949 flags.go:64] FLAG: --housekeeping-interval="10s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932026 4949 flags.go:64] FLAG: --http-check-frequency="20s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932036 4949 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932045 4949 flags.go:64] FLAG: --image-credential-provider-config="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932053 4949 flags.go:64] FLAG: --image-gc-high-threshold="85" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932062 4949 flags.go:64] FLAG: --image-gc-low-threshold="80" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932071 4949 flags.go:64] FLAG: --image-service-endpoint="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932080 4949 flags.go:64] FLAG: --kernel-memcg-notification="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932088 4949 flags.go:64] FLAG: --kube-api-burst="100" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932097 4949 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932107 4949 flags.go:64] FLAG: --kube-api-qps="50" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932116 4949 flags.go:64] FLAG: --kube-reserved="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932126 4949 flags.go:64] FLAG: --kube-reserved-cgroup="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932134 4949 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932144 4949 flags.go:64] FLAG: --kubelet-cgroups="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932153 4949 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932161 4949 flags.go:64] FLAG: --lock-file="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932196 4949 flags.go:64] FLAG: --log-cadvisor-usage="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932206 4949 flags.go:64] FLAG: --log-flush-frequency="5s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932215 4949 flags.go:64] FLAG: --log-json-info-buffer-size="0" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932229 4949 flags.go:64] FLAG: --log-json-split-stream="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932238 4949 flags.go:64] FLAG: --log-text-info-buffer-size="0" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932246 4949 flags.go:64] FLAG: --log-text-split-stream="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932255 4949 flags.go:64] FLAG: --logging-format="text" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932264 4949 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932274 4949 flags.go:64] FLAG: --make-iptables-util-chains="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932284 4949 flags.go:64] FLAG: --manifest-url="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932292 4949 flags.go:64] FLAG: --manifest-url-header="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932304 4949 flags.go:64] FLAG: --max-housekeeping-interval="15s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932313 4949 flags.go:64] FLAG: --max-open-files="1000000" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932324 4949 flags.go:64] FLAG: --max-pods="110" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932333 4949 flags.go:64] FLAG: --maximum-dead-containers="-1" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932342 4949 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932351 4949 flags.go:64] FLAG: --memory-manager-policy="None" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932360 4949 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932369 4949 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932378 4949 flags.go:64] FLAG: --node-ip="192.168.126.11" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932387 4949 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932409 4949 flags.go:64] FLAG: --node-status-max-images="50" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932418 4949 flags.go:64] FLAG: --node-status-update-frequency="10s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932427 4949 flags.go:64] FLAG: --oom-score-adj="-999" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932437 4949 flags.go:64] FLAG: --pod-cidr="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932448 4949 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932467 4949 flags.go:64] FLAG: --pod-manifest-path="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932478 4949 flags.go:64] FLAG: --pod-max-pids="-1" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932491 4949 flags.go:64] FLAG: --pods-per-core="0" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932502 4949 flags.go:64] FLAG: --port="10250" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932514 4949 flags.go:64] FLAG: --protect-kernel-defaults="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932526 4949 flags.go:64] FLAG: --provider-id="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932536 4949 flags.go:64] FLAG: --qos-reserved="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932547 4949 flags.go:64] FLAG: --read-only-port="10255" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932558 4949 flags.go:64] FLAG: --register-node="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932571 4949 flags.go:64] FLAG: --register-schedulable="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932582 4949 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932601 4949 flags.go:64] FLAG: --registry-burst="10" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932613 4949 flags.go:64] FLAG: --registry-qps="5" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932624 4949 flags.go:64] FLAG: --reserved-cpus="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932633 4949 flags.go:64] FLAG: --reserved-memory="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932645 4949 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932654 4949 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932663 4949 flags.go:64] FLAG: --rotate-certificates="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932672 4949 flags.go:64] FLAG: --rotate-server-certificates="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932682 4949 flags.go:64] FLAG: --runonce="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932691 4949 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932700 4949 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932710 4949 flags.go:64] FLAG: --seccomp-default="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932718 4949 flags.go:64] FLAG: --serialize-image-pulls="true" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932727 4949 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932737 4949 flags.go:64] FLAG: --storage-driver-db="cadvisor" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932746 4949 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932756 4949 flags.go:64] FLAG: --storage-driver-password="root" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932765 4949 flags.go:64] FLAG: --storage-driver-secure="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932773 4949 flags.go:64] FLAG: --storage-driver-table="stats" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932782 4949 flags.go:64] FLAG: --storage-driver-user="root" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932791 4949 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932801 4949 flags.go:64] FLAG: --sync-frequency="1m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932810 4949 flags.go:64] FLAG: --system-cgroups="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932818 4949 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932832 4949 flags.go:64] FLAG: --system-reserved-cgroup="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932841 4949 flags.go:64] FLAG: --tls-cert-file="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932850 4949 flags.go:64] FLAG: --tls-cipher-suites="[]" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932861 4949 flags.go:64] FLAG: --tls-min-version="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932870 4949 flags.go:64] FLAG: --tls-private-key-file="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932886 4949 flags.go:64] FLAG: --topology-manager-policy="none" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932894 4949 flags.go:64] FLAG: --topology-manager-policy-options="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932904 4949 flags.go:64] FLAG: --topology-manager-scope="container" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932913 4949 flags.go:64] FLAG: --v="2" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932924 4949 flags.go:64] FLAG: --version="false" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932936 4949 flags.go:64] FLAG: --vmodule="" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932946 4949 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.932956 4949 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933154 4949 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933200 4949 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933209 4949 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933217 4949 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933226 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933234 4949 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933243 4949 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933251 4949 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933260 4949 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933269 4949 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933277 4949 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933286 4949 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933294 4949 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933302 4949 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933311 4949 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933319 4949 feature_gate.go:330] unrecognized feature gate: Example Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933328 4949 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933336 4949 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933344 4949 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933352 4949 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933361 4949 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933371 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933384 4949 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933396 4949 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933408 4949 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933419 4949 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933429 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933438 4949 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933451 4949 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933461 4949 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933471 4949 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933480 4949 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933490 4949 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933500 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933508 4949 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933516 4949 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933524 4949 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933531 4949 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933539 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933547 4949 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933555 4949 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933563 4949 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933571 4949 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933581 4949 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933592 4949 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933601 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933611 4949 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933621 4949 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933632 4949 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933640 4949 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933649 4949 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933657 4949 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933664 4949 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933672 4949 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933682 4949 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933692 4949 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933701 4949 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933709 4949 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933717 4949 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933726 4949 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933734 4949 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933742 4949 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933750 4949 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933758 4949 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933766 4949 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933775 4949 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933783 4949 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933791 4949 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933799 4949 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933807 4949 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.933815 4949 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.933839 4949 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.950048 4949 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.950120 4949 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950321 4949 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950344 4949 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950356 4949 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950367 4949 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950377 4949 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950387 4949 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950397 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950409 4949 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950424 4949 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950441 4949 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950453 4949 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950465 4949 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950477 4949 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950488 4949 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950499 4949 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950510 4949 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950521 4949 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950533 4949 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950545 4949 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950556 4949 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950567 4949 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950579 4949 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950590 4949 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950601 4949 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950612 4949 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950626 4949 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950639 4949 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950650 4949 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950661 4949 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950671 4949 feature_gate.go:330] unrecognized feature gate: Example Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950681 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950690 4949 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950700 4949 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950711 4949 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950724 4949 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950734 4949 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950744 4949 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950754 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950769 4949 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950782 4949 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950793 4949 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950803 4949 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950815 4949 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950827 4949 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950838 4949 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950848 4949 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950859 4949 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950870 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950883 4949 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950894 4949 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950906 4949 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950917 4949 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950928 4949 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950939 4949 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950949 4949 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950960 4949 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950971 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950981 4949 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.950991 4949 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951001 4949 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951014 4949 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951027 4949 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951039 4949 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951050 4949 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951061 4949 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951070 4949 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951080 4949 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951090 4949 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951100 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951110 4949 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951122 4949 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.951139 4949 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951469 4949 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951490 4949 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951503 4949 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951516 4949 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951557 4949 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951569 4949 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951579 4949 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951590 4949 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951601 4949 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951612 4949 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951623 4949 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951636 4949 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951651 4949 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951662 4949 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951673 4949 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951683 4949 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951693 4949 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951703 4949 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951713 4949 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951723 4949 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951733 4949 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951743 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951757 4949 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951770 4949 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951781 4949 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951791 4949 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951802 4949 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951812 4949 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951827 4949 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951841 4949 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951852 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951862 4949 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951874 4949 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951886 4949 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951901 4949 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951911 4949 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951921 4949 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951930 4949 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951941 4949 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951951 4949 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951961 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951971 4949 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951981 4949 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.951992 4949 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952003 4949 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952013 4949 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952023 4949 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952033 4949 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952043 4949 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952053 4949 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952063 4949 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952074 4949 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952084 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952094 4949 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952104 4949 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952115 4949 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952125 4949 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952135 4949 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952144 4949 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952154 4949 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952164 4949 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952209 4949 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952220 4949 feature_gate.go:330] unrecognized feature gate: Example Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952230 4949 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952245 4949 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952256 4949 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952269 4949 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952280 4949 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952291 4949 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952302 4949 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 16 11:06:50 crc kubenswrapper[4949]: W0216 11:06:50.952315 4949 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.952331 4949 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.952656 4949 server.go:940] "Client rotation is on, will bootstrap in background" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.958376 4949 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.958767 4949 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.960502 4949 server.go:997] "Starting client certificate rotation" Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.960566 4949 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.971471 4949 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-16 18:29:44.975250248 +0000 UTC Feb 16 11:06:50 crc kubenswrapper[4949]: I0216 11:06:50.971637 4949 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.001291 4949 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.005659 4949 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.021124 4949 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.035551 4949 log.go:25] "Validated CRI v1 runtime API" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.097543 4949 log.go:25] "Validated CRI v1 image API" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.099497 4949 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.105217 4949 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-02-16-11-02-23-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.105251 4949 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.124794 4949 manager.go:217] Machine: {Timestamp:2026-02-16 11:06:51.121439608 +0000 UTC m=+0.750773823 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:fcf7eef6-e236-4c8e-bd9c-41b70a7621ed BootID:bc77a723-71f9-4f4a-b80e-2feb50c63f04 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:ce:4a:28 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:ce:4a:28 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:df:52:97 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:4e:9b:f9 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:ae:55:e9 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:45:e5:56 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:d6:f5:c4:4c:db:83 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:7a:35:85:a3:78:5b Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.125088 4949 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.125330 4949 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.137007 4949 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.137390 4949 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.137439 4949 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.137692 4949 topology_manager.go:138] "Creating topology manager with none policy" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.137707 4949 container_manager_linux.go:303] "Creating device plugin manager" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.138358 4949 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.138400 4949 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.138729 4949 state_mem.go:36] "Initialized new in-memory state store" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.138841 4949 server.go:1245] "Using root directory" path="/var/lib/kubelet" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.145451 4949 kubelet.go:418] "Attempting to sync node with API server" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.145481 4949 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.145512 4949 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.145528 4949 kubelet.go:324] "Adding apiserver pod source" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.145566 4949 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.155675 4949 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.156753 4949 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Feb 16 11:06:51 crc kubenswrapper[4949]: W0216 11:06:51.157220 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:51 crc kubenswrapper[4949]: W0216 11:06:51.157280 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.157420 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.157340 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.158534 4949 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160278 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160301 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160310 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160319 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160332 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160357 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160365 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160376 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160385 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160393 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160403 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.160410 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.169008 4949 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.169962 4949 server.go:1280] "Started kubelet" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.171909 4949 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.171937 4949 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.172508 4949 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.172779 4949 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Feb 16 11:06:51 crc systemd[1]: Started Kubernetes Kubelet. Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.174427 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.174475 4949 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.174590 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 10:51:36.294586838 +0000 UTC Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.174638 4949 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.174758 4949 volume_manager.go:287] "The desired_state_of_world populator starts" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.174799 4949 volume_manager.go:289] "Starting Kubelet Volume Manager" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.174865 4949 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Feb 16 11:06:51 crc kubenswrapper[4949]: W0216 11:06:51.175436 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.175551 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.176243 4949 factory.go:153] Registering CRI-O factory Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.177452 4949 factory.go:221] Registration of the crio container factory successfully Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.177612 4949 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.177720 4949 factory.go:55] Registering systemd factory Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.177833 4949 factory.go:221] Registration of the systemd container factory successfully Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.177901 4949 factory.go:103] Registering Raw factory Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.177971 4949 manager.go:1196] Started watching for new ooms in manager Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.178520 4949 manager.go:319] Starting recovery of all containers Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.178541 4949 server.go:460] "Adding debug handlers to kubelet server" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.181139 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="200ms" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.206565 4949 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.129:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1894b567ddbe13ee default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-16 11:06:51.169928174 +0000 UTC m=+0.799262349,LastTimestamp:2026-02-16 11:06:51.169928174 +0000 UTC m=+0.799262349,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.213532 4949 manager.go:324] Recovery completed Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214603 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214695 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214731 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214760 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214786 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214813 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214841 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214869 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214901 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214960 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.214988 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215019 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215049 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215079 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215103 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215127 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215154 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215245 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215275 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215300 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215326 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215358 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215387 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215414 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215440 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215464 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215522 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215551 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215573 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215594 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215614 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215688 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215713 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215734 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215754 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215773 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215793 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215812 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215890 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215936 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215956 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.215978 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216002 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216021 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216040 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216060 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216080 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216099 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216120 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216140 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216158 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216212 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216248 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216278 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216332 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216361 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216394 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216461 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216491 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216517 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216544 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216570 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216598 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216623 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216651 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216677 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216701 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216726 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216755 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216781 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216805 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216832 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216856 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216895 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216923 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216947 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216970 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.216994 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217021 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217043 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217067 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217092 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217118 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217143 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217201 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217229 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217255 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217283 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217309 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217332 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217356 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217384 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217413 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217445 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217477 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217509 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217538 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217566 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217595 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217624 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217656 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217689 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217719 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217763 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217803 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217845 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217878 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217905 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217935 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217965 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.217992 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.218021 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.218052 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.218078 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.218106 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.218134 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.218161 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220460 4949 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220521 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220548 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220568 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220619 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220651 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220757 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220839 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220888 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.220940 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221009 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221040 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221061 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221081 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221102 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221123 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221142 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221161 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221222 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221250 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221274 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221294 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221313 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221333 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221352 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221371 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221390 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221411 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221431 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221451 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221472 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221490 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221569 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221625 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221640 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221654 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221667 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221679 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221689 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221716 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221725 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221735 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221745 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221756 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221765 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221774 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221784 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221793 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221804 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221815 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221825 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221835 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221845 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221865 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221880 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221893 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221905 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221915 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221924 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221933 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221943 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221953 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221962 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221971 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221981 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.221997 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222007 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222016 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222027 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222036 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222045 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222055 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222065 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222075 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222085 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222094 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222103 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222113 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222122 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222130 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222141 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222152 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222162 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222185 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222197 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222209 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222222 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222232 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222241 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222252 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222261 4949 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222271 4949 reconstruct.go:97] "Volume reconstruction finished" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222279 4949 reconciler.go:26] "Reconciler: start to sync state" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.222779 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.224316 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.224351 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.224362 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.225215 4949 cpu_manager.go:225] "Starting CPU manager" policy="none" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.225233 4949 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.225252 4949 state_mem.go:36] "Initialized new in-memory state store" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.232339 4949 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.233877 4949 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.233931 4949 status_manager.go:217] "Starting to sync pod status with apiserver" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.233965 4949 kubelet.go:2335] "Starting kubelet main sync loop" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.234023 4949 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Feb 16 11:06:51 crc kubenswrapper[4949]: W0216 11:06:51.234693 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.234754 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.240253 4949 policy_none.go:49] "None policy: Start" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.242898 4949 memory_manager.go:170] "Starting memorymanager" policy="None" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.242921 4949 state_mem.go:35] "Initializing new in-memory state store" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.274965 4949 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.295395 4949 manager.go:334] "Starting Device Plugin manager" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.295650 4949 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.295749 4949 server.go:79] "Starting device plugin registration server" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.296151 4949 eviction_manager.go:189] "Eviction manager: starting control loop" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.296260 4949 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.296678 4949 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.296750 4949 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.296757 4949 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.304333 4949 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.334312 4949 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.334439 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.336841 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.336867 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.336879 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.337003 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.337184 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.337244 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.338902 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.338970 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.339003 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.340878 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.340908 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.340918 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.341054 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.341287 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.341325 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.342907 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.342936 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.342966 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.343063 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.343080 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.343121 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.343289 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.343556 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.343699 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.345098 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.345119 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.345130 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.345140 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.345191 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.345225 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.345449 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.345656 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.346271 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.349464 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.349486 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.349496 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.349704 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.349732 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.350187 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.350210 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.350219 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.350485 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.350506 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.350523 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.381797 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="400ms" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.397294 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.398848 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.398899 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.398912 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.398929 4949 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.399326 4949 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.129:6443: connect: connection refused" node="crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425020 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425049 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425070 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425087 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425105 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425118 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425151 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425190 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425209 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425226 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425270 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425299 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425317 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425332 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.425347 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526610 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526680 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526715 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526745 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526771 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526798 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526827 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526855 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526863 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526934 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526883 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526997 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527031 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527058 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527065 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527084 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527110 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527094 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527109 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527150 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.526960 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527187 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527164 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527061 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527034 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527213 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527229 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527213 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527252 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.527135 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.600213 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.601527 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.601594 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.601618 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.601665 4949 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.602290 4949 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.129:6443: connect: connection refused" node="crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.678681 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.684071 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.704119 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: W0216 11:06:51.721274 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-01ac21fe516635cfa03f86d211442f4a5e70c5dfc222252e95afe89ffe7fa832 WatchSource:0}: Error finding container 01ac21fe516635cfa03f86d211442f4a5e70c5dfc222252e95afe89ffe7fa832: Status 404 returned error can't find the container with id 01ac21fe516635cfa03f86d211442f4a5e70c5dfc222252e95afe89ffe7fa832 Feb 16 11:06:51 crc kubenswrapper[4949]: W0216 11:06:51.723520 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-2dbac59b27aa8eb14fe35e1ebcd3cb57e9ca9c97e03322048afd79d42523e557 WatchSource:0}: Error finding container 2dbac59b27aa8eb14fe35e1ebcd3cb57e9ca9c97e03322048afd79d42523e557: Status 404 returned error can't find the container with id 2dbac59b27aa8eb14fe35e1ebcd3cb57e9ca9c97e03322048afd79d42523e557 Feb 16 11:06:51 crc kubenswrapper[4949]: W0216 11:06:51.725995 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-caa9a04107a77064805e29a5ddf7af07c72d1b58d3259f3e6f9308e66796a39e WatchSource:0}: Error finding container caa9a04107a77064805e29a5ddf7af07c72d1b58d3259f3e6f9308e66796a39e: Status 404 returned error can't find the container with id caa9a04107a77064805e29a5ddf7af07c72d1b58d3259f3e6f9308e66796a39e Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.727842 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: I0216 11:06:51.738753 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:51 crc kubenswrapper[4949]: W0216 11:06:51.758361 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-a3dd929d2f5cd36420feb69b681d087eb4c8c4a54d50e264ea1ac6d9244556f6 WatchSource:0}: Error finding container a3dd929d2f5cd36420feb69b681d087eb4c8c4a54d50e264ea1ac6d9244556f6: Status 404 returned error can't find the container with id a3dd929d2f5cd36420feb69b681d087eb4c8c4a54d50e264ea1ac6d9244556f6 Feb 16 11:06:51 crc kubenswrapper[4949]: E0216 11:06:51.783256 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="800ms" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.003316 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.005152 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.005221 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.005237 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.005266 4949 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 16 11:06:52 crc kubenswrapper[4949]: E0216 11:06:52.005716 4949 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.129:6443: connect: connection refused" node="crc" Feb 16 11:06:52 crc kubenswrapper[4949]: W0216 11:06:52.071949 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:52 crc kubenswrapper[4949]: E0216 11:06:52.072039 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.173655 4949 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.175747 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 09:53:40.476551467 +0000 UTC Feb 16 11:06:52 crc kubenswrapper[4949]: W0216 11:06:52.227455 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:52 crc kubenswrapper[4949]: E0216 11:06:52.227527 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.237932 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a3dd929d2f5cd36420feb69b681d087eb4c8c4a54d50e264ea1ac6d9244556f6"} Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.238807 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"dd14c12e09c008b64eca08cec1afe8c1fc7c8b0d15792a9875800aec8bc3d1ac"} Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.239543 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"caa9a04107a77064805e29a5ddf7af07c72d1b58d3259f3e6f9308e66796a39e"} Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.240392 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2dbac59b27aa8eb14fe35e1ebcd3cb57e9ca9c97e03322048afd79d42523e557"} Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.241114 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"01ac21fe516635cfa03f86d211442f4a5e70c5dfc222252e95afe89ffe7fa832"} Feb 16 11:06:52 crc kubenswrapper[4949]: W0216 11:06:52.344641 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:52 crc kubenswrapper[4949]: E0216 11:06:52.344787 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:52 crc kubenswrapper[4949]: W0216 11:06:52.390957 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:52 crc kubenswrapper[4949]: E0216 11:06:52.391035 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:52 crc kubenswrapper[4949]: E0216 11:06:52.585360 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="1.6s" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.806448 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.807752 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.807781 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.807789 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:52 crc kubenswrapper[4949]: I0216 11:06:52.807808 4949 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 16 11:06:52 crc kubenswrapper[4949]: E0216 11:06:52.808163 4949 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.129:6443: connect: connection refused" node="crc" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.058876 4949 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 16 11:06:53 crc kubenswrapper[4949]: E0216 11:06:53.060513 4949 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.173898 4949 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.176006 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 19:35:39.61331149 +0000 UTC Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.246198 4949 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503" exitCode=0 Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.246335 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.246518 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503"} Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.247135 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.247203 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.247217 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.249205 4949 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2565364211de3c6939cdb3ca8da3aa7ef8000d9f3f06bcd16e29c4c42a34382f" exitCode=0 Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.249275 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2565364211de3c6939cdb3ca8da3aa7ef8000d9f3f06bcd16e29c4c42a34382f"} Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.249463 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.249769 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.250536 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.250590 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.250608 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.250747 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.250869 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.250925 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.252737 4949 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6" exitCode=0 Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.252818 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6"} Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.252918 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.254267 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.254308 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.254326 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.260999 4949 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185" exitCode=0 Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.261106 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185"} Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.261118 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.262084 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.262103 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.262111 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.267258 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00"} Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.267303 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea"} Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.267318 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf"} Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.267329 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6"} Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.267418 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.271944 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.272072 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:53 crc kubenswrapper[4949]: I0216 11:06:53.272117 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:54 crc kubenswrapper[4949]: W0216 11:06:54.123754 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:54 crc kubenswrapper[4949]: E0216 11:06:54.123845 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.173903 4949 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.176235 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 13:38:17.732046121 +0000 UTC Feb 16 11:06:54 crc kubenswrapper[4949]: E0216 11:06:54.186259 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="3.2s" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.279811 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.279861 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.279875 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.279966 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.280865 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.280903 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.280918 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.283864 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.283911 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.283923 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.283931 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.283937 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.284837 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.284870 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.284884 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.287998 4949 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="0baef348b9c85059c5f4a738bfb7b349840cbd4e8961bb6af2316f33c4a261de" exitCode=0 Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.288086 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"0baef348b9c85059c5f4a738bfb7b349840cbd4e8961bb6af2316f33c4a261de"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.290717 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.293760 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.293794 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.293806 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.296767 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.297286 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.297455 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"26a3c0d2164b3d6a615dce1de93b44a4a85b1831d656a20e5ce11909fad6d776"} Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.299649 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.299672 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.299680 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.299701 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.299735 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.299748 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.409023 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.410502 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.410537 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.410547 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.410573 4949 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 16 11:06:54 crc kubenswrapper[4949]: E0216 11:06:54.410994 4949 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.129:6443: connect: connection refused" node="crc" Feb 16 11:06:54 crc kubenswrapper[4949]: W0216 11:06:54.504371 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.129:6443: connect: connection refused Feb 16 11:06:54 crc kubenswrapper[4949]: E0216 11:06:54.504436 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.129:6443: connect: connection refused" logger="UnhandledError" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.776295 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:54 crc kubenswrapper[4949]: I0216 11:06:54.784752 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.176912 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 17:31:21.102719607 +0000 UTC Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.301538 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.304042 4949 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5f69e0a9b42aa74bc12bf3d79b1464a31fbbec6be4c39ff694e3bdf51ab50fea" exitCode=255 Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.304100 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"5f69e0a9b42aa74bc12bf3d79b1464a31fbbec6be4c39ff694e3bdf51ab50fea"} Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.304276 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.305647 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.305695 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.305713 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.306644 4949 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d5345be27a2db9d73776d5ec051b1412aa7c60480b016f7d7299ca39cbcb571e" exitCode=0 Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.306765 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.306844 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d5345be27a2db9d73776d5ec051b1412aa7c60480b016f7d7299ca39cbcb571e"} Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.306880 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.306943 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.306947 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.306858 4949 scope.go:117] "RemoveContainer" containerID="5f69e0a9b42aa74bc12bf3d79b1464a31fbbec6be4c39ff694e3bdf51ab50fea" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.307806 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.307833 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.307843 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.308115 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309157 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309234 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309257 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309571 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309603 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309619 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309888 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309921 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:55 crc kubenswrapper[4949]: I0216 11:06:55.309936 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.184551 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 10:34:18.221017492 +0000 UTC Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.294198 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.466758 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e62021c4bd9f8a880bfd35a31c9467b889b9c60f997315b43bd91c627e048741"} Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.466828 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f0c804bf50897f22680438dffe08ed43976a8727d4e47dd061d38544200bd650"} Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.466842 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"104ad7368ab72a073b763f3cd93153248595865a4924f64217e8c6bcbb0eb7c2"} Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.471766 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.624611 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546"} Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.624811 4949 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.624867 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.624948 4949 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.624982 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.624868 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.627793 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.627829 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.627853 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.628092 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.628164 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.628230 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.630062 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.630099 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.630111 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:56 crc kubenswrapper[4949]: I0216 11:06:56.985460 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.185419 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 06:53:07.788716401 +0000 UTC Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.405138 4949 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.405775 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.611966 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.613368 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.613417 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.613433 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.613465 4949 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.629921 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"68f3394a9c364d944bc7f4575d64344b2a5c1484ff4eea28eafc2aa7062b3895"} Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.629960 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9013e8e3d6a5ec4e245ffa66030573caddb88a4e3d54462f7e4f69980364ab66"} Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.629965 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.630024 4949 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.630044 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.629970 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631206 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631251 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631273 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631252 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631315 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631329 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631352 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631384 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:57 crc kubenswrapper[4949]: I0216 11:06:57.631395 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.186802 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 19:59:56.175452093 +0000 UTC Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.346449 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.526491 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.632270 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.632291 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.632460 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.633245 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.633270 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.633281 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.633294 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.633512 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.633552 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.638503 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.638529 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:58 crc kubenswrapper[4949]: I0216 11:06:58.638538 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.187064 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 19:39:13.030981298 +0000 UTC Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.294288 4949 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.294385 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.471743 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.635115 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.635279 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.636637 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.636686 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.636703 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.637217 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.637277 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:06:59 crc kubenswrapper[4949]: I0216 11:06:59.637293 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:00 crc kubenswrapper[4949]: I0216 11:07:00.187220 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 14:06:24.956938572 +0000 UTC Feb 16 11:07:01 crc kubenswrapper[4949]: I0216 11:07:01.188328 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 09:36:16.6220638 +0000 UTC Feb 16 11:07:01 crc kubenswrapper[4949]: E0216 11:07:01.304694 4949 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 16 11:07:01 crc kubenswrapper[4949]: I0216 11:07:01.781327 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:07:01 crc kubenswrapper[4949]: I0216 11:07:01.781579 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:07:01 crc kubenswrapper[4949]: I0216 11:07:01.782961 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:01 crc kubenswrapper[4949]: I0216 11:07:01.783000 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:01 crc kubenswrapper[4949]: I0216 11:07:01.783010 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:02 crc kubenswrapper[4949]: I0216 11:07:02.188593 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 19:18:23.485486267 +0000 UTC Feb 16 11:07:03 crc kubenswrapper[4949]: I0216 11:07:03.189223 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 11:12:24.461264685 +0000 UTC Feb 16 11:07:04 crc kubenswrapper[4949]: I0216 11:07:04.189346 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 02:27:20.796483559 +0000 UTC Feb 16 11:07:05 crc kubenswrapper[4949]: W0216 11:07:05.158408 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.158837 4949 trace.go:236] Trace[1043049132]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (16-Feb-2026 11:06:55.156) (total time: 10002ms): Feb 16 11:07:05 crc kubenswrapper[4949]: Trace[1043049132]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (11:07:05.158) Feb 16 11:07:05 crc kubenswrapper[4949]: Trace[1043049132]: [10.002057327s] [10.002057327s] END Feb 16 11:07:05 crc kubenswrapper[4949]: E0216 11:07:05.159091 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.173975 4949 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Feb 16 11:07:05 crc kubenswrapper[4949]: W0216 11:07:05.183285 4949 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.183619 4949 trace.go:236] Trace[63360462]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (16-Feb-2026 11:06:55.182) (total time: 10001ms): Feb 16 11:07:05 crc kubenswrapper[4949]: Trace[63360462]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (11:07:05.183) Feb 16 11:07:05 crc kubenswrapper[4949]: Trace[63360462]: [10.001391722s] [10.001391722s] END Feb 16 11:07:05 crc kubenswrapper[4949]: E0216 11:07:05.183792 4949 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.190371 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 01:14:36.253369624 +0000 UTC Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.618791 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.619253 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.620700 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.620744 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.620754 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.673100 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.673316 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.674366 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.674394 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.674406 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:05 crc kubenswrapper[4949]: I0216 11:07:05.688817 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.001746 4949 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.001815 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.005400 4949 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.005464 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.191087 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 17:27:23.313103127 +0000 UTC Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.653104 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.653900 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.653924 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.653934 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.993693 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.993950 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.995528 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.995698 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:06 crc kubenswrapper[4949]: I0216 11:07:06.995814 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:07 crc kubenswrapper[4949]: I0216 11:07:07.191954 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 02:26:46.421897635 +0000 UTC Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.192319 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 11:59:11.890509951 +0000 UTC Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.351628 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.351769 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.353089 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.353240 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.353316 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.356059 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.658356 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.659527 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.659570 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:08 crc kubenswrapper[4949]: I0216 11:07:08.659580 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:09 crc kubenswrapper[4949]: I0216 11:07:09.192588 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 08:25:42.787629457 +0000 UTC Feb 16 11:07:09 crc kubenswrapper[4949]: I0216 11:07:09.296266 4949 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 16 11:07:09 crc kubenswrapper[4949]: I0216 11:07:09.296402 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 16 11:07:10 crc kubenswrapper[4949]: I0216 11:07:10.113568 4949 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 16 11:07:10 crc kubenswrapper[4949]: I0216 11:07:10.192846 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 16:24:27.354203604 +0000 UTC Feb 16 11:07:10 crc kubenswrapper[4949]: E0216 11:07:10.997970 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Feb 16 11:07:10 crc kubenswrapper[4949]: I0216 11:07:10.998355 4949 trace.go:236] Trace[924381367]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (16-Feb-2026 11:07:00.585) (total time: 10413ms): Feb 16 11:07:10 crc kubenswrapper[4949]: Trace[924381367]: ---"Objects listed" error: 10413ms (11:07:10.998) Feb 16 11:07:10 crc kubenswrapper[4949]: Trace[924381367]: [10.41308076s] [10.41308076s] END Feb 16 11:07:10 crc kubenswrapper[4949]: I0216 11:07:10.998373 4949 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.006497 4949 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 16 11:07:11 crc kubenswrapper[4949]: E0216 11:07:11.008540 4949 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.009146 4949 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.011984 4949 trace.go:236] Trace[478342485]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (16-Feb-2026 11:06:58.956) (total time: 12055ms): Feb 16 11:07:11 crc kubenswrapper[4949]: Trace[478342485]: ---"Objects listed" error: 12055ms (11:07:11.011) Feb 16 11:07:11 crc kubenswrapper[4949]: Trace[478342485]: [12.055165574s] [12.055165574s] END Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.012284 4949 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.194077 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 03:36:49.584506544 +0000 UTC Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.265650 4949 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38722->192.168.126.11:17697: read: connection reset by peer" start-of-body= Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.265666 4949 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38726->192.168.126.11:17697: read: connection reset by peer" start-of-body= Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.265824 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38726->192.168.126.11:17697: read: connection reset by peer" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.265753 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38722->192.168.126.11:17697: read: connection reset by peer" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.266493 4949 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.266691 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.267142 4949 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.267217 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Feb 16 11:07:11 crc kubenswrapper[4949]: E0216 11:07:11.304781 4949 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.425282 4949 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.669263 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.669831 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.671561 4949 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546" exitCode=255 Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.671624 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546"} Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.671703 4949 scope.go:117] "RemoveContainer" containerID="5f69e0a9b42aa74bc12bf3d79b1464a31fbbec6be4c39ff694e3bdf51ab50fea" Feb 16 11:07:11 crc kubenswrapper[4949]: I0216 11:07:11.694786 4949 scope.go:117] "RemoveContainer" containerID="7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546" Feb 16 11:07:11 crc kubenswrapper[4949]: E0216 11:07:11.695008 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.162263 4949 apiserver.go:52] "Watching apiserver" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.164939 4949 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.165291 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c"] Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.165680 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.165758 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.165812 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.166571 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.166822 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.166907 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.168466 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.168606 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.168664 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.168559 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.170724 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.171855 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.171887 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.172112 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.172206 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.173812 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.174277 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.174275 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.175691 4949 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.195019 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 15:43:21.791522293 +0000 UTC Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.203475 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217253 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217303 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217341 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217370 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217391 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217420 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217441 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217464 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217484 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217508 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217537 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217560 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217582 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217603 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217624 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217645 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217666 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217692 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217704 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217716 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217737 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217762 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217783 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217805 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217827 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217849 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217873 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217898 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217921 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217965 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218003 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218025 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218052 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218100 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218120 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218144 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218170 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218206 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218225 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218245 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218268 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218290 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218314 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218335 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218358 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218381 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218405 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218426 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218445 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218476 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218594 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218624 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218692 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218714 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218734 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218755 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218775 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218797 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218819 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218844 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218870 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218896 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218939 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218966 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218990 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219031 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219058 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219087 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219119 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219147 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219192 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219220 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219245 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219268 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219293 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219317 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219341 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219364 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219389 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219413 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219436 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219458 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219491 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219519 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219544 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219571 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219596 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219619 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219644 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219668 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219695 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219722 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219746 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219771 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219794 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219820 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219846 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219870 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219894 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219919 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219944 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219969 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219993 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220017 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220044 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220068 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220093 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220116 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220142 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220188 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220217 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220241 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220266 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220291 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220315 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220342 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220365 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220392 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220415 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220439 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220465 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220488 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220512 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220537 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220560 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220583 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220608 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220632 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220654 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220679 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220704 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220731 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220757 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220783 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220806 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220833 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220857 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220882 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220910 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220934 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220959 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220985 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221019 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221044 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221067 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221090 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221114 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221140 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221164 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221295 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221322 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221346 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221370 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221395 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221418 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221442 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221463 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221484 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221505 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221528 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221550 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221574 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221597 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221621 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221645 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221668 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221692 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221715 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221742 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221767 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221811 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221837 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221864 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221889 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221914 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221938 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221963 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222016 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222040 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222065 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222090 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222116 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222141 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222189 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222216 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222242 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222267 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222299 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222324 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222349 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222372 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222398 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222423 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222446 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222470 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222494 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222548 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222585 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222615 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222639 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222695 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222722 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222746 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222771 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222796 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222822 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222846 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222873 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222897 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222921 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222976 4949 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.223505 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.217922 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.218854 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219033 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219500 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219674 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219685 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219849 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.219937 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220123 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220305 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220420 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220483 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220574 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220663 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220820 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.220967 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221013 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221138 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221356 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.221533 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.222059 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.223772 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.223828 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.224110 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.224392 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.224568 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.224727 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.224910 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.225066 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.225380 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.225562 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.225985 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.226686 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.226878 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.227020 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.227449 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.227611 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.227789 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.227973 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.228133 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.228467 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.228746 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.229377 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.229529 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.229797 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.229976 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.231237 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.231469 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.231483 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.231644 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.231699 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.231810 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.231965 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.232130 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.232241 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.232295 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.232544 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.232647 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.232772 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.232876 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.233019 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.233049 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.233220 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.239469 4949 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.239581 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:12.739557218 +0000 UTC m=+22.368891393 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.239689 4949 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.239724 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:12.739715142 +0000 UTC m=+22.369049327 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.240344 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.240791 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.241130 4949 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.241306 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.241596 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.241709 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.241916 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.241986 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.242016 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.242563 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.242710 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.242799 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.248055 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.252710 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.253516 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.254050 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.254410 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.256454 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.256855 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.257459 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.258587 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.259296 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.259340 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.259820 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.260289 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.260817 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.261148 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.261295 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.261683 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.261617 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.261909 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.262002 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.262249 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.262565 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.262849 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.263286 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.263676 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.263996 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.264268 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.264367 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.264501 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.264773 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.265224 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.265476 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.265558 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.265826 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.265920 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.266192 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.266336 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.266537 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.267338 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.267643 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.267917 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.268265 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.268801 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.269011 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.271440 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.271617 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.272002 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.272498 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.268443 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.268488 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.274644 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.274663 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.275205 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.275524 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.275934 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.276351 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.276648 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.280332 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.280549 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.280726 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.280730 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.281058 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.281128 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.281145 4949 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.281234 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:12.78121176 +0000 UTC m=+22.410546145 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.281483 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.281785 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.284489 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.284605 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.284991 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.286189 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.286617 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.286634 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.286784 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:07:12.78676107 +0000 UTC m=+22.416095235 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.287161 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.287562 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.288375 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.291693 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.292582 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.292901 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.292914 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.293135 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.293384 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.293666 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.293883 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.294730 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.296785 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.296941 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.297130 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.297571 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.297596 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.297704 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.298457 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.298597 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.298876 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.299505 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.299960 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.300019 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.300352 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.300558 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.300873 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.300899 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.301022 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.301026 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.301508 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.301508 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.301625 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.301625 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.301794 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.302041 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.302008 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.302160 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.302434 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.302451 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.303162 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.303221 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.303240 4949 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.303261 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.303317 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:12.803288306 +0000 UTC m=+22.432622681 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.303336 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.303585 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.303975 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.304252 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.305246 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.307378 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.307580 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.308022 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.308067 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.308239 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.308389 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.309157 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.316685 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.316702 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.319759 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f69e0a9b42aa74bc12bf3d79b1464a31fbbec6be4c39ff694e3bdf51ab50fea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"message\\\":\\\"W0216 11:06:54.357337 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0216 11:06:54.357708 1 crypto.go:601] Generating new CA for check-endpoints-signer@1771240014 cert, and key in /tmp/serving-cert-4158916687/serving-signer.crt, /tmp/serving-cert-4158916687/serving-signer.key\\\\nI0216 11:06:54.543593 1 observer_polling.go:159] Starting file observer\\\\nW0216 11:06:54.547331 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0216 11:06:54.547472 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:06:54.549234 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4158916687/tls.crt::/tmp/serving-cert-4158916687/tls.key\\\\\\\"\\\\nF0216 11:06:54.808450 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323316 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323738 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323818 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323902 4949 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323921 4949 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323932 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323945 4949 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323954 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323964 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323972 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323981 4949 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.323990 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324000 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324011 4949 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324021 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324030 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324039 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324047 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324057 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324065 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324075 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324083 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324092 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324101 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324109 4949 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324118 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324127 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324135 4949 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324144 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324153 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324161 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324188 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324197 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324209 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324218 4949 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324226 4949 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324236 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324247 4949 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324266 4949 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324277 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324322 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324333 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324342 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324351 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324383 4949 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324392 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324401 4949 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324413 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324424 4949 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324465 4949 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324480 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324490 4949 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324504 4949 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324514 4949 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324523 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324532 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324541 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324552 4949 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324560 4949 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324569 4949 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324578 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324588 4949 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324597 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324607 4949 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324618 4949 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324628 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324638 4949 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324647 4949 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324657 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324667 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324676 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324686 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324695 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324704 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324713 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324723 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324734 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324744 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324754 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324763 4949 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324774 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324784 4949 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324794 4949 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324803 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324814 4949 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324824 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324833 4949 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324843 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324853 4949 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324863 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324873 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324882 4949 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324892 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324903 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324912 4949 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324921 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324931 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324941 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324950 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324959 4949 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324967 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324976 4949 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324985 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.324994 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325003 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325014 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325023 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325035 4949 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325044 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325054 4949 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325062 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325071 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325081 4949 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325090 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325100 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325109 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325119 4949 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325128 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325140 4949 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325194 4949 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325206 4949 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325216 4949 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325225 4949 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325234 4949 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325244 4949 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325252 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325261 4949 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325270 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325282 4949 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325291 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325300 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325309 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325317 4949 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325326 4949 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325335 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325343 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325352 4949 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325360 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325368 4949 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325376 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325385 4949 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325394 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325402 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325417 4949 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325476 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325485 4949 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325493 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325502 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325512 4949 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325519 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325530 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325537 4949 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325546 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325554 4949 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325563 4949 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325571 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325583 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325591 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325600 4949 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325609 4949 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325618 4949 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325627 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325636 4949 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325645 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325654 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325662 4949 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325670 4949 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325678 4949 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325687 4949 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325695 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325703 4949 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325711 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325720 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325728 4949 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325737 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325745 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325753 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325761 4949 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325769 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325778 4949 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325786 4949 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325795 4949 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325804 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325812 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325820 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325828 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325838 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325846 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325894 4949 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325925 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325941 4949 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325954 4949 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.325969 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.326167 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.326223 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.344537 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.348684 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.350973 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.355573 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.369772 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.376722 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.389271 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.430463 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.430512 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.430523 4949 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.487806 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.497063 4949 csr.go:261] certificate signing request csr-rd52n is approved, waiting to be issued Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.498195 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 16 11:07:12 crc kubenswrapper[4949]: W0216 11:07:12.505396 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-56c40fb042c875dcbb6aab2bdef468f22639a18b7fb7d2f6fc51ca6313577af7 WatchSource:0}: Error finding container 56c40fb042c875dcbb6aab2bdef468f22639a18b7fb7d2f6fc51ca6313577af7: Status 404 returned error can't find the container with id 56c40fb042c875dcbb6aab2bdef468f22639a18b7fb7d2f6fc51ca6313577af7 Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.509731 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 16 11:07:12 crc kubenswrapper[4949]: W0216 11:07:12.523434 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-f37e66c7b830feebf77ec07534aea3326822551c92821d8ea817f2291a3341a8 WatchSource:0}: Error finding container f37e66c7b830feebf77ec07534aea3326822551c92821d8ea817f2291a3341a8: Status 404 returned error can't find the container with id f37e66c7b830feebf77ec07534aea3326822551c92821d8ea817f2291a3341a8 Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.528291 4949 csr.go:257] certificate signing request csr-rd52n is issued Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.588368 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-kxn9z"] Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.592258 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kxn9z" Feb 16 11:07:12 crc kubenswrapper[4949]: W0216 11:07:12.598034 4949 reflector.go:561] object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": failed to list *v1.Secret: secrets "node-resolver-dockercfg-kz9s7" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.598079 4949 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"node-resolver-dockercfg-kz9s7\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"node-resolver-dockercfg-kz9s7\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 16 11:07:12 crc kubenswrapper[4949]: W0216 11:07:12.598253 4949 reflector.go:561] object-"openshift-dns"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.598269 4949 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 16 11:07:12 crc kubenswrapper[4949]: W0216 11:07:12.598451 4949 reflector.go:561] object-"openshift-dns"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.598546 4949 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.618090 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.639062 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.681686 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.701762 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.702376 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.710967 4949 scope.go:117] "RemoveContainer" containerID="7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.711112 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.712709 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"f37e66c7b830feebf77ec07534aea3326822551c92821d8ea817f2291a3341a8"} Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.715591 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0343c4bf5a2a2685d5e88dc1fb8d5f9bbd7766a2f75e352efd79a48a288342da"} Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.722146 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.726891 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"56c40fb042c875dcbb6aab2bdef468f22639a18b7fb7d2f6fc51ca6313577af7"} Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.733370 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.735332 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cc3f701c-2094-4818-871c-547fc5636a55-hosts-file\") pod \"node-resolver-kxn9z\" (UID: \"cc3f701c-2094-4818-871c-547fc5636a55\") " pod="openshift-dns/node-resolver-kxn9z" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.735445 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rld7r\" (UniqueName: \"kubernetes.io/projected/cc3f701c-2094-4818-871c-547fc5636a55-kube-api-access-rld7r\") pod \"node-resolver-kxn9z\" (UID: \"cc3f701c-2094-4818-871c-547fc5636a55\") " pod="openshift-dns/node-resolver-kxn9z" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.741607 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.753702 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f69e0a9b42aa74bc12bf3d79b1464a31fbbec6be4c39ff694e3bdf51ab50fea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"message\\\":\\\"W0216 11:06:54.357337 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0216 11:06:54.357708 1 crypto.go:601] Generating new CA for check-endpoints-signer@1771240014 cert, and key in /tmp/serving-cert-4158916687/serving-signer.crt, /tmp/serving-cert-4158916687/serving-signer.key\\\\nI0216 11:06:54.543593 1 observer_polling.go:159] Starting file observer\\\\nW0216 11:06:54.547331 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0216 11:06:54.547472 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:06:54.549234 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4158916687/tls.crt::/tmp/serving-cert-4158916687/tls.key\\\\\\\"\\\\nF0216 11:06:54.808450 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.787972 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.800839 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.810235 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.818796 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.832020 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.836245 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.836320 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rld7r\" (UniqueName: \"kubernetes.io/projected/cc3f701c-2094-4818-871c-547fc5636a55-kube-api-access-rld7r\") pod \"node-resolver-kxn9z\" (UID: \"cc3f701c-2094-4818-871c-547fc5636a55\") " pod="openshift-dns/node-resolver-kxn9z" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.836351 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.836377 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cc3f701c-2094-4818-871c-547fc5636a55-hosts-file\") pod \"node-resolver-kxn9z\" (UID: \"cc3f701c-2094-4818-871c-547fc5636a55\") " pod="openshift-dns/node-resolver-kxn9z" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.836407 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836429 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:07:13.836403478 +0000 UTC m=+23.465737643 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.836480 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836522 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836542 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836563 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836570 4949 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836585 4949 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.836522 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836620 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:13.836609063 +0000 UTC m=+23.465943228 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836574 4949 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836640 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:13.836633304 +0000 UTC m=+23.465967469 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.836521 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cc3f701c-2094-4818-871c-547fc5636a55-hosts-file\") pod \"node-resolver-kxn9z\" (UID: \"cc3f701c-2094-4818-871c-547fc5636a55\") " pod="openshift-dns/node-resolver-kxn9z" Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836669 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:13.836660074 +0000 UTC m=+23.465994339 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836545 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836703 4949 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:12 crc kubenswrapper[4949]: E0216 11:07:12.836781 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:13.836740476 +0000 UTC m=+23.466074651 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.843258 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.865215 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:12 crc kubenswrapper[4949]: I0216 11:07:12.880006 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.195227 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 05:08:08.618574113 +0000 UTC Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.238285 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.239050 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.239843 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.240599 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.241222 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.241725 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.242388 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.242896 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.243603 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.244202 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.244759 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.245451 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.245941 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.246577 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.247100 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.247640 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.248887 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.249345 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.249945 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.250586 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.251069 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.251665 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.252137 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.252820 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.253221 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.253810 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.254518 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.255040 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.255683 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.257606 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.258588 4949 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.258730 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.260508 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.261104 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.261632 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.263124 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.265721 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.266365 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.267611 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.268444 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.269490 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.270285 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.271505 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.272556 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.272991 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.273858 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.274363 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.275503 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.275946 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.276460 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.277301 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.277893 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.279367 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.279967 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.418104 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-jsmls"] Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.418402 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.428805 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gfr2q"] Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.429709 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.432480 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.432721 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.436697 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.436958 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-vjrxd"] Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.437604 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.439636 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.439662 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.439678 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-26lss"] Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.440382 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.449429 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450005 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450009 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450023 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450047 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450099 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450215 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450232 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450432 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450482 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450586 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450673 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450733 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.450990 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.476959 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.488068 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.501408 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.525791 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.529075 4949 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-02-16 11:02:12 +0000 UTC, rotation deadline is 2026-10-30 22:11:29.412520992 +0000 UTC Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.529133 4949 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6155h4m15.883390866s for next certificate rotation Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.538469 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542259 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q7gk\" (UniqueName: \"kubernetes.io/projected/3e42a398-f83a-4463-9ab7-4e093e80d744-kube-api-access-7q7gk\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542300 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7ksw\" (UniqueName: \"kubernetes.io/projected/6ff1c788-4b66-48e9-8178-006f231d264c-kube-api-access-n7ksw\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542321 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542340 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ff1c788-4b66-48e9-8178-006f231d264c-cni-binary-copy\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542357 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542373 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-bin\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542390 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-system-cni-dir\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542406 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ff1c788-4b66-48e9-8178-006f231d264c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542419 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-os-release\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542434 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-cnibin\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542446 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-os-release\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542462 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3e42a398-f83a-4463-9ab7-4e093e80d744-cni-binary-copy\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542477 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-k8s-cni-cncf-io\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542491 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-etc-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542506 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-script-lib\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542521 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-cni-bin\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542536 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-conf-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542553 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-multus-certs\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542568 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-systemd-units\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542583 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-slash\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542597 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-var-lib-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542618 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-cni-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542641 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-socket-dir-parent\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542656 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-netns\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542670 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/39ca5ab7-457c-4404-a3eb-f6acce74843b-rootfs\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542683 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-cni-multus\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542697 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-kubelet\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542715 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovn-node-metrics-cert\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542733 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpzms\" (UniqueName: \"kubernetes.io/projected/39ca5ab7-457c-4404-a3eb-f6acce74843b-kube-api-access-wpzms\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542755 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-node-log\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542784 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzd7d\" (UniqueName: \"kubernetes.io/projected/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-kube-api-access-qzd7d\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542803 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39ca5ab7-457c-4404-a3eb-f6acce74843b-proxy-tls\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542828 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-system-cni-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542849 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-netns\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542869 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-hostroot\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542889 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-netd\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542909 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-env-overrides\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542937 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-systemd\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542963 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-cnibin\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.542988 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-daemon-config\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.543006 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-config\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.543090 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-kubelet\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.543122 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-etc-kubernetes\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.543226 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.543249 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-ovn\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.543269 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-log-socket\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.543286 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-ovn-kubernetes\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.543304 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39ca5ab7-457c-4404-a3eb-f6acce74843b-mcd-auth-proxy-config\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.548559 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.558506 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.567349 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.570971 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.579541 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.582076 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.590725 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.591683 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rld7r\" (UniqueName: \"kubernetes.io/projected/cc3f701c-2094-4818-871c-547fc5636a55-kube-api-access-rld7r\") pod \"node-resolver-kxn9z\" (UID: \"cc3f701c-2094-4818-871c-547fc5636a55\") " pod="openshift-dns/node-resolver-kxn9z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.602792 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.615118 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.633045 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644130 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q7gk\" (UniqueName: \"kubernetes.io/projected/3e42a398-f83a-4463-9ab7-4e093e80d744-kube-api-access-7q7gk\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644197 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7ksw\" (UniqueName: \"kubernetes.io/projected/6ff1c788-4b66-48e9-8178-006f231d264c-kube-api-access-n7ksw\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644232 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ff1c788-4b66-48e9-8178-006f231d264c-cni-binary-copy\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644272 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644293 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644315 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-bin\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644352 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-system-cni-dir\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644371 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ff1c788-4b66-48e9-8178-006f231d264c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644390 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-os-release\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644435 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-os-release\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644455 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3e42a398-f83a-4463-9ab7-4e093e80d744-cni-binary-copy\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644473 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-k8s-cni-cncf-io\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644511 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-cnibin\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644513 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-bin\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644531 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-systemd-units\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644553 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-slash\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644597 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-var-lib-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644622 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-etc-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644642 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-script-lib\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644686 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-cni-bin\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644708 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-conf-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644753 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-multus-certs\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644796 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-cni-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644839 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-socket-dir-parent\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644860 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-netns\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644880 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/39ca5ab7-457c-4404-a3eb-f6acce74843b-rootfs\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644921 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovn-node-metrics-cert\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644946 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpzms\" (UniqueName: \"kubernetes.io/projected/39ca5ab7-457c-4404-a3eb-f6acce74843b-kube-api-access-wpzms\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.644987 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-cni-multus\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645008 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-kubelet\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645028 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39ca5ab7-457c-4404-a3eb-f6acce74843b-proxy-tls\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645064 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-node-log\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645085 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzd7d\" (UniqueName: \"kubernetes.io/projected/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-kube-api-access-qzd7d\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645106 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-hostroot\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645139 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-netd\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645158 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-env-overrides\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645203 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-system-cni-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645224 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-netns\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645244 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-cnibin\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645292 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-systemd\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645310 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-config\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645329 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-daemon-config\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645352 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ff1c788-4b66-48e9-8178-006f231d264c-cni-binary-copy\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645366 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645398 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-kubelet\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645423 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-etc-kubernetes\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645444 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39ca5ab7-457c-4404-a3eb-f6acce74843b-mcd-auth-proxy-config\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645466 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-ovn\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645486 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-log-socket\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645508 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-ovn-kubernetes\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645572 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-ovn-kubernetes\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645602 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645633 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645670 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-kubelet\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.645697 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-etc-kubernetes\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646244 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39ca5ab7-457c-4404-a3eb-f6acce74843b-mcd-auth-proxy-config\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646292 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-ovn\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646297 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646329 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646343 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-netns\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646320 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-log-socket\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646375 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-system-cni-dir\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646422 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-hostroot\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646434 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-cni-multus\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646449 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-kubelet\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646482 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/39ca5ab7-457c-4404-a3eb-f6acce74843b-rootfs\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.646815 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ff1c788-4b66-48e9-8178-006f231d264c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647090 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-os-release\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647683 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3e42a398-f83a-4463-9ab7-4e093e80d744-cni-binary-copy\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647728 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-k8s-cni-cncf-io\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647765 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-cnibin\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647795 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-systemd-units\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647827 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-slash\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647855 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-var-lib-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647887 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-etc-openvswitch\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647924 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-multus-certs\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647936 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-conf-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.647976 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-cni-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648027 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-socket-dir-parent\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648067 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-node-log\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648103 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-run-netns\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648131 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-netd\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648239 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-cnibin\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648280 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-systemd\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648601 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-script-lib\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648644 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-host-var-lib-cni-bin\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648690 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3e42a398-f83a-4463-9ab7-4e093e80d744-system-cni-dir\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648754 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-env-overrides\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.648894 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/3e42a398-f83a-4463-9ab7-4e093e80d744-multus-daemon-config\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.649206 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-config\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.649263 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ff1c788-4b66-48e9-8178-006f231d264c-os-release\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.650816 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovn-node-metrics-cert\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.651490 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39ca5ab7-457c-4404-a3eb-f6acce74843b-proxy-tls\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.660473 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q7gk\" (UniqueName: \"kubernetes.io/projected/3e42a398-f83a-4463-9ab7-4e093e80d744-kube-api-access-7q7gk\") pod \"multus-jsmls\" (UID: \"3e42a398-f83a-4463-9ab7-4e093e80d744\") " pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.661519 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.661790 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7ksw\" (UniqueName: \"kubernetes.io/projected/6ff1c788-4b66-48e9-8178-006f231d264c-kube-api-access-n7ksw\") pod \"multus-additional-cni-plugins-vjrxd\" (UID: \"6ff1c788-4b66-48e9-8178-006f231d264c\") " pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.669922 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpzms\" (UniqueName: \"kubernetes.io/projected/39ca5ab7-457c-4404-a3eb-f6acce74843b-kube-api-access-wpzms\") pod \"machine-config-daemon-26lss\" (UID: \"39ca5ab7-457c-4404-a3eb-f6acce74843b\") " pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.671384 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzd7d\" (UniqueName: \"kubernetes.io/projected/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-kube-api-access-qzd7d\") pod \"ovnkube-node-gfr2q\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.676406 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.690805 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.707368 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.723186 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.731509 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894"} Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.731559 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807"} Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.732949 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba"} Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.741262 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.744392 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-jsmls" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.752240 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.756341 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.763889 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.771126 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.773968 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.784446 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.800887 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.816211 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: W0216 11:07:13.821513 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e42a398_f83a_4463_9ab7_4e093e80d744.slice/crio-c1e760cf4811dff2750dd14860c41252ae2e7978a06866af31f0dbaacaf98d37 WatchSource:0}: Error finding container c1e760cf4811dff2750dd14860c41252ae2e7978a06866af31f0dbaacaf98d37: Status 404 returned error can't find the container with id c1e760cf4811dff2750dd14860c41252ae2e7978a06866af31f0dbaacaf98d37 Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.821960 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kxn9z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.833668 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.846901 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.847043 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.847068 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847091 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:07:15.847067417 +0000 UTC m=+25.476401582 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847130 4949 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.847131 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847192 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:15.847164179 +0000 UTC m=+25.476498344 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.847209 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847290 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847305 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847315 4949 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847352 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:15.847340293 +0000 UTC m=+25.476674458 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847880 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847913 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847923 4949 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.847907 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.847952 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:15.847939867 +0000 UTC m=+25.477274022 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.848029 4949 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:13 crc kubenswrapper[4949]: E0216 11:07:13.848140 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:15.848121011 +0000 UTC m=+25.477455246 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.859247 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.869788 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.888484 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.902669 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.914421 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.925980 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:13 crc kubenswrapper[4949]: I0216 11:07:13.943762 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:13Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: W0216 11:07:14.048223 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f545ae8_1b14_4abd_b4ea_844f6ae7b54d.slice/crio-a2473bb4251cdf5da2ce75e2bdbb3fdb2cda10e15b5e4becefdd76b7c7f4cf34 WatchSource:0}: Error finding container a2473bb4251cdf5da2ce75e2bdbb3fdb2cda10e15b5e4becefdd76b7c7f4cf34: Status 404 returned error can't find the container with id a2473bb4251cdf5da2ce75e2bdbb3fdb2cda10e15b5e4becefdd76b7c7f4cf34 Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.196188 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 12:55:35.123803481 +0000 UTC Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.234573 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.234619 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.234667 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:14 crc kubenswrapper[4949]: E0216 11:07:14.234775 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:14 crc kubenswrapper[4949]: E0216 11:07:14.234849 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:14 crc kubenswrapper[4949]: E0216 11:07:14.234903 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.738226 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kxn9z" event={"ID":"cc3f701c-2094-4818-871c-547fc5636a55","Type":"ContainerStarted","Data":"993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.738308 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kxn9z" event={"ID":"cc3f701c-2094-4818-871c-547fc5636a55","Type":"ContainerStarted","Data":"766a490308d055cf94d50bc1cf5cc018b7ec1a6a9640dbc3483c51de720e019d"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.739918 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jsmls" event={"ID":"3e42a398-f83a-4463-9ab7-4e093e80d744","Type":"ContainerStarted","Data":"9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.739962 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jsmls" event={"ID":"3e42a398-f83a-4463-9ab7-4e093e80d744","Type":"ContainerStarted","Data":"c1e760cf4811dff2750dd14860c41252ae2e7978a06866af31f0dbaacaf98d37"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.741976 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerStarted","Data":"a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.742034 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerStarted","Data":"7c6604ba95bd87b2aed176c8258ec4bb864a6a7c8955c171f8ab929d1ec60e6f"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.744408 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.744487 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.744503 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"ec0a1e335d3a284c70926c38f616949dba0dc8cec29b167de302c089eff02aa6"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.746038 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a" exitCode=0 Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.746184 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.746219 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"a2473bb4251cdf5da2ce75e2bdbb3fdb2cda10e15b5e4becefdd76b7c7f4cf34"} Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.756877 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.771225 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.796775 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.812317 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.839142 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.855154 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.874141 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.890577 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.922789 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.939277 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.948421 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.965109 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.977688 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:14 crc kubenswrapper[4949]: I0216 11:07:14.991419 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:14Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.002860 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.018114 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.031786 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.045463 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.065648 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.111224 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.148843 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.164290 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.178070 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.201237 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 23:34:46.104609701 +0000 UTC Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.202115 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.750559 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2"} Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.751701 4949 generic.go:334] "Generic (PLEG): container finished" podID="6ff1c788-4b66-48e9-8178-006f231d264c" containerID="a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce" exitCode=0 Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.751748 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerDied","Data":"a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce"} Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.754597 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080"} Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.754638 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c"} Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.754651 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4"} Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.754664 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c"} Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.754675 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9"} Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.754687 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477"} Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.771072 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.784634 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.801560 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.812229 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.823686 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.836612 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.845916 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.867277 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.868293 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.868451 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.868488 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:07:19.868471025 +0000 UTC m=+29.497805190 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.868547 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.868581 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.868603 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.868615 4949 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.868651 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:19.868642589 +0000 UTC m=+29.497976754 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.868666 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.868689 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.868779 4949 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.868813 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:19.868803692 +0000 UTC m=+29.498137857 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.869378 4949 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.869449 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:19.869429117 +0000 UTC m=+29.498763292 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.869709 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.869728 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.869736 4949 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:15 crc kubenswrapper[4949]: E0216 11:07:15.869770 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:19.869761025 +0000 UTC m=+29.499095180 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.880058 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.893653 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.908938 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.922319 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.935031 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.947491 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.958826 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.969144 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.980678 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.994461 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:15Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.999399 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-kvrsd"] Feb 16 11:07:15 crc kubenswrapper[4949]: I0216 11:07:15.999717 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.002091 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.002449 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.002632 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.003328 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.008079 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.029368 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.041555 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.052628 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.066073 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.070460 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4e1092c7-896f-4334-b157-ac007cdb0d79-host\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.070496 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4e1092c7-896f-4334-b157-ac007cdb0d79-serviceca\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.070528 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v7hm\" (UniqueName: \"kubernetes.io/projected/4e1092c7-896f-4334-b157-ac007cdb0d79-kube-api-access-8v7hm\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.075502 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.086078 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.095848 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.114353 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.127500 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.141511 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.151653 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.167102 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.171542 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4e1092c7-896f-4334-b157-ac007cdb0d79-host\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.171579 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4e1092c7-896f-4334-b157-ac007cdb0d79-serviceca\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.171624 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v7hm\" (UniqueName: \"kubernetes.io/projected/4e1092c7-896f-4334-b157-ac007cdb0d79-kube-api-access-8v7hm\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.171677 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4e1092c7-896f-4334-b157-ac007cdb0d79-host\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.172907 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4e1092c7-896f-4334-b157-ac007cdb0d79-serviceca\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.179884 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.200463 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.201921 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 10:38:23.055597362 +0000 UTC Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.202513 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v7hm\" (UniqueName: \"kubernetes.io/projected/4e1092c7-896f-4334-b157-ac007cdb0d79-kube-api-access-8v7hm\") pod \"node-ca-kvrsd\" (UID: \"4e1092c7-896f-4334-b157-ac007cdb0d79\") " pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.212908 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.226818 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.235002 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:16 crc kubenswrapper[4949]: E0216 11:07:16.235118 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.235020 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:16 crc kubenswrapper[4949]: E0216 11:07:16.235194 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.235002 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:16 crc kubenswrapper[4949]: E0216 11:07:16.235242 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.238145 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.249119 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.300009 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.304261 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.307842 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.312503 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.321540 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.331941 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.340584 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.354925 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.369074 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.382055 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.398778 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.409410 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.425282 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.443829 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.461925 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.482630 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.484891 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-kvrsd" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.495756 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.514810 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: W0216 11:07:16.516125 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e1092c7_896f_4334_b157_ac007cdb0d79.slice/crio-0bb1e127c62046386e120c7290a9a0dde1073829df6e9f6d492e82f7cf68d23e WatchSource:0}: Error finding container 0bb1e127c62046386e120c7290a9a0dde1073829df6e9f6d492e82f7cf68d23e: Status 404 returned error can't find the container with id 0bb1e127c62046386e120c7290a9a0dde1073829df6e9f6d492e82f7cf68d23e Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.561365 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.592300 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.635930 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.687578 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.730189 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.750973 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.763893 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-kvrsd" event={"ID":"4e1092c7-896f-4334-b157-ac007cdb0d79","Type":"ContainerStarted","Data":"0bb1e127c62046386e120c7290a9a0dde1073829df6e9f6d492e82f7cf68d23e"} Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.766335 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerStarted","Data":"20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243"} Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.796306 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.837911 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.876674 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.916704 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.952972 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:16 crc kubenswrapper[4949]: I0216 11:07:16.993014 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:16Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.031978 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.074857 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.124449 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.157055 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.195143 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.202464 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 07:48:24.147329997 +0000 UTC Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.233385 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.271717 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.312041 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.354166 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.391339 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.408828 4949 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.410792 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.410826 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.410838 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.410958 4949 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.437146 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.484988 4949 kubelet_node_status.go:115] "Node was previously registered" node="crc" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.485302 4949 kubelet_node_status.go:79] "Successfully registered node" node="crc" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.486433 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.486462 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.486471 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.486487 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.486500 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.512515 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: E0216 11:07:17.513568 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.516346 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.516381 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.516393 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.516409 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.516421 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: E0216 11:07:17.527003 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.531152 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.531205 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.531217 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.531233 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.531243 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: E0216 11:07:17.541744 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.545576 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.545619 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.545630 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.545647 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.545658 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.554008 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: E0216 11:07:17.557087 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.559934 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.559961 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.559971 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.559986 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.559994 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: E0216 11:07:17.572295 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: E0216 11:07:17.572410 4949 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.574166 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.574212 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.574223 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.574239 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.574247 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.592825 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.676396 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.676429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.676437 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.676450 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.676460 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.771334 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-kvrsd" event={"ID":"4e1092c7-896f-4334-b157-ac007cdb0d79","Type":"ContainerStarted","Data":"5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.774476 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.776938 4949 generic.go:334] "Generic (PLEG): container finished" podID="6ff1c788-4b66-48e9-8178-006f231d264c" containerID="20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243" exitCode=0 Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.776967 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerDied","Data":"20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.778020 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.778045 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.778053 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.778067 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.778078 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.786627 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.805053 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.828420 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.844151 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.860854 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.874826 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.881025 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.881059 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.881069 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.881082 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.881091 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.891334 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.910670 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.953240 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.983079 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.983269 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.983334 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.983396 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.983459 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:17Z","lastTransitionTime":"2026-02-16T11:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:17 crc kubenswrapper[4949]: I0216 11:07:17.992430 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:17Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.034141 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.071245 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.085494 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.085533 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.085543 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.085557 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.085567 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.117585 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.152925 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.187739 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.187774 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.187783 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.187796 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.187805 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.193774 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.203203 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 15:15:15.943932115 +0000 UTC Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.231230 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.234976 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:18 crc kubenswrapper[4949]: E0216 11:07:18.235121 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.235529 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:18 crc kubenswrapper[4949]: E0216 11:07:18.235613 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.235683 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:18 crc kubenswrapper[4949]: E0216 11:07:18.235769 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.295059 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.295403 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.295416 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.295433 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.295445 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.296676 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.327455 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.352841 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.394317 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.398075 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.398109 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.398120 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.398139 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.398150 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.434680 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.472669 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.500090 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.500128 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.500137 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.500152 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.500162 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.515556 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.553311 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.595143 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.607103 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.607152 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.607163 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.607199 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.607213 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.630968 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.672311 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.710101 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.710136 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.710149 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.710181 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.710191 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.715923 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.784158 4949 generic.go:334] "Generic (PLEG): container finished" podID="6ff1c788-4b66-48e9-8178-006f231d264c" containerID="516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6" exitCode=0 Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.784210 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerDied","Data":"516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.806022 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.819796 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.819827 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.819837 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.819853 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.819864 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.820068 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.830831 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.874393 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.912069 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.923372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.923437 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.923473 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.923580 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.923596 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:18Z","lastTransitionTime":"2026-02-16T11:07:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.954351 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:18 crc kubenswrapper[4949]: I0216 11:07:18.998574 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:18Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.026267 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.026317 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.026328 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.026349 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.026362 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.034154 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.076278 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.115492 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.128392 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.128448 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.128456 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.128473 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.128491 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.152163 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.193604 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.204049 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 01:16:33.062672807 +0000 UTC Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.231428 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.231471 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.231486 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.231506 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.231520 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.234688 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.281281 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.333698 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.333759 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.333781 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.333993 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.334007 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.436070 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.436118 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.436130 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.436149 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.436160 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.539197 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.539246 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.539255 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.539270 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.539280 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.642241 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.642272 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.642280 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.642293 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.642301 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.744345 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.744385 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.744393 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.744408 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.744418 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.791057 4949 generic.go:334] "Generic (PLEG): container finished" podID="6ff1c788-4b66-48e9-8178-006f231d264c" containerID="bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414" exitCode=0 Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.791109 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerDied","Data":"bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.805667 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.826248 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.847526 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.851066 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.851108 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.851120 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.851140 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.851152 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.864011 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.878558 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.893065 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.911112 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.911240 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.911268 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.911293 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.911316 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911697 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911716 4949 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911731 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911748 4949 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911757 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:07:27.911743232 +0000 UTC m=+37.541077397 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911772 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:27.911766853 +0000 UTC m=+37.541101018 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911806 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:27.911784443 +0000 UTC m=+37.541118688 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.911792 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911901 4949 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911913 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911928 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911941 4949 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.911977 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:27.911962917 +0000 UTC m=+37.541297172 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.912000 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:27.911990288 +0000 UTC m=+37.541324573 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.925078 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.943030 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.953243 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.953279 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.953287 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.953302 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.953312 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:19Z","lastTransitionTime":"2026-02-16T11:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.956591 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.962263 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.963217 4949 scope.go:117] "RemoveContainer" containerID="7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546" Feb 16 11:07:19 crc kubenswrapper[4949]: E0216 11:07:19.963446 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.973500 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:19 crc kubenswrapper[4949]: I0216 11:07:19.985725 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.000489 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.011385 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.055725 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.055781 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.055790 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.055804 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.055815 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.157984 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.158032 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.158044 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.158065 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.158079 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.204522 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 07:51:31.17076079 +0000 UTC Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.235284 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.235313 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.235377 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:20 crc kubenswrapper[4949]: E0216 11:07:20.235540 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:20 crc kubenswrapper[4949]: E0216 11:07:20.235995 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:20 crc kubenswrapper[4949]: E0216 11:07:20.236064 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.260567 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.260610 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.260622 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.260636 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.260648 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.363275 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.363326 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.363339 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.363359 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.363370 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.467488 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.467546 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.467576 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.467597 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.467610 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.570395 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.570427 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.570437 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.570450 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.570462 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.672931 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.672997 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.673020 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.673051 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.673070 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.775822 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.776066 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.776197 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.776319 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.776436 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.800553 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.801485 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.801592 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.801616 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.806885 4949 generic.go:334] "Generic (PLEG): container finished" podID="6ff1c788-4b66-48e9-8178-006f231d264c" containerID="8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd" exitCode=0 Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.806951 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerDied","Data":"8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.829487 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.836012 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.839502 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.851141 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.864367 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.878942 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.878974 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.878985 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.879003 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.879015 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.881654 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.894472 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.912447 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.930455 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.944844 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.960356 4949 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.960451 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.983493 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.983817 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.983826 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.983840 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.983849 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:20Z","lastTransitionTime":"2026-02-16T11:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.986060 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:20 crc kubenswrapper[4949]: I0216 11:07:20.998207 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:20Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.013591 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.024751 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.043013 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.057862 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.071304 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.083817 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.086466 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.086490 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.086498 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.086512 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.086521 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.100585 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.110633 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.124255 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.140281 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.156265 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.169850 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.188098 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.189340 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.189359 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.189367 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.189379 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.189387 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.200995 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.205541 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 15:08:01.613031072 +0000 UTC Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.216165 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.230520 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.253141 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.270995 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.283075 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.291949 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.291992 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.292003 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.292021 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.292032 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.305278 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.318444 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.330477 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.340887 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.354126 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.364159 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.376317 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.387891 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.394674 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.394724 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.394739 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.394760 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.394777 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.400330 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.432483 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.478947 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.497067 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.497097 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.497105 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.497119 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.497128 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.512512 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.599795 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.599844 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.599856 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.599872 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.599884 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.702315 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.702360 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.702368 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.702382 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.702398 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.804794 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.804832 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.804844 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.804859 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.804870 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.814318 4949 generic.go:334] "Generic (PLEG): container finished" podID="6ff1c788-4b66-48e9-8178-006f231d264c" containerID="04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf" exitCode=0 Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.814345 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerDied","Data":"04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.832000 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.847940 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.861027 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.873092 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.886329 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.899417 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.907902 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.907943 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.907953 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.907970 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.907982 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:21Z","lastTransitionTime":"2026-02-16T11:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.916648 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.928595 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.950566 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.965636 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:21 crc kubenswrapper[4949]: I0216 11:07:21.988472 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.001995 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.010196 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.010233 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.010245 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.010262 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.010271 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.038210 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.072080 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.112411 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.112454 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.112465 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.112481 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.112494 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.206677 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 09:51:00.04259381 +0000 UTC Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.214437 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.214492 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.214504 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.214522 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.214533 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.234766 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.234789 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.234845 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:22 crc kubenswrapper[4949]: E0216 11:07:22.234891 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:22 crc kubenswrapper[4949]: E0216 11:07:22.235001 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:22 crc kubenswrapper[4949]: E0216 11:07:22.235096 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.317462 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.317537 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.317552 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.317571 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.317582 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.419771 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.419814 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.419822 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.419836 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.419845 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.522023 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.522053 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.522060 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.522073 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.522081 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.624219 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.624244 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.624253 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.624266 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.624277 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.726453 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.726486 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.726494 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.726508 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.726517 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.820753 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" event={"ID":"6ff1c788-4b66-48e9-8178-006f231d264c","Type":"ContainerStarted","Data":"87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.828282 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.828335 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.828352 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.828378 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.828399 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.841395 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.854232 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.864934 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.880321 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.890552 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.908548 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.920212 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.930642 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.931244 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.931282 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.931292 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.931308 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.931319 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:22Z","lastTransitionTime":"2026-02-16T11:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.947382 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.962365 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.976557 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:22 crc kubenswrapper[4949]: I0216 11:07:22.993525 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:22Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.013482 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.027728 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.033788 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.033857 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.033875 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.033901 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.033921 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.136334 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.136388 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.136405 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.136427 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.136443 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.207883 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 01:42:53.433236052 +0000 UTC Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.238823 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.238877 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.238891 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.238912 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.238927 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.342369 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.342447 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.342464 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.342485 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.342502 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.444460 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.444512 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.444528 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.444551 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.444568 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.546865 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.546911 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.546923 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.546937 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.546946 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.649843 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.649874 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.649882 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.649894 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.649904 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.753309 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.753382 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.753408 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.753440 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.753462 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.824294 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/0.log" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.825875 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19" exitCode=1 Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.825905 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.826514 4949 scope.go:117] "RemoveContainer" containerID="4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.841649 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.857454 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.857513 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.857526 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.857549 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.857581 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.859952 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.874684 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.890060 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.902364 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.921677 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.936812 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.950736 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.960311 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.960362 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.960381 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.960411 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.960433 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:23Z","lastTransitionTime":"2026-02-16T11:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.965705 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.980682 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:23 crc kubenswrapper[4949]: I0216 11:07:23.991899 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:23Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.006075 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.017976 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.039016 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"message\\\":\\\"3 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0216 11:07:22.792636 6173 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0216 11:07:22.792642 6173 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0216 11:07:22.792666 6173 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0216 11:07:22.792671 6173 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0216 11:07:22.792697 6173 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0216 11:07:22.792709 6173 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0216 11:07:22.792745 6173 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0216 11:07:22.792752 6173 handler.go:208] Removed *v1.Node event handler 2\\\\nI0216 11:07:22.792764 6173 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:22.792773 6173 handler.go:208] Removed *v1.Node event handler 7\\\\nI0216 11:07:22.792787 6173 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0216 11:07:22.792799 6173 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0216 11:07:22.793125 6173 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0216 11:07:22.793198 6173 factory.go:656] Stopping watch factory\\\\nI0216 11:07:22.793224 6173 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.063241 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.063357 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.063384 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.063414 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.063437 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.168321 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.168433 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.168459 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.168532 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.168559 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.208840 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 22:20:52.984242801 +0000 UTC Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.234440 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.234555 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:24 crc kubenswrapper[4949]: E0216 11:07:24.234593 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.234684 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:24 crc kubenswrapper[4949]: E0216 11:07:24.234779 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:24 crc kubenswrapper[4949]: E0216 11:07:24.234875 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.271100 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.271146 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.271158 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.271187 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.271200 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.373759 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.373800 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.373808 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.373821 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.373829 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.475948 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.476024 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.476040 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.476059 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.476448 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.578871 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.578943 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.578956 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.578997 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.579011 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.681040 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.681083 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.681091 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.681105 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.681115 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.783723 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.783796 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.783820 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.783850 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.783871 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.832067 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/1.log" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.832913 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/0.log" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.836970 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751" exitCode=1 Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.837023 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.837079 4949 scope.go:117] "RemoveContainer" containerID="4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.837933 4949 scope.go:117] "RemoveContainer" containerID="b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751" Feb 16 11:07:24 crc kubenswrapper[4949]: E0216 11:07:24.838118 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.859722 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.870738 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.881371 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.885784 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.886013 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.886083 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.886200 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.886281 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.891996 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.903214 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.916156 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.927260 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.936756 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.946676 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.955474 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.970496 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"message\\\":\\\"3 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0216 11:07:22.792636 6173 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0216 11:07:22.792642 6173 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0216 11:07:22.792666 6173 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0216 11:07:22.792671 6173 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0216 11:07:22.792697 6173 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0216 11:07:22.792709 6173 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0216 11:07:22.792745 6173 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0216 11:07:22.792752 6173 handler.go:208] Removed *v1.Node event handler 2\\\\nI0216 11:07:22.792764 6173 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:22.792773 6173 handler.go:208] Removed *v1.Node event handler 7\\\\nI0216 11:07:22.792787 6173 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0216 11:07:22.792799 6173 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0216 11:07:22.793125 6173 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0216 11:07:22.793198 6173 factory.go:656] Stopping watch factory\\\\nI0216 11:07:22.793224 6173 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.979525 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.988818 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.988859 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.988878 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.988902 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.988913 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:24Z","lastTransitionTime":"2026-02-16T11:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:24 crc kubenswrapper[4949]: I0216 11:07:24.991157 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.000479 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.092479 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.092518 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.092534 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.092549 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.092559 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.195874 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.195949 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.195984 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.196016 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.196039 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.209905 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 06:38:15.303007806 +0000 UTC Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.286401 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472"] Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.287016 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.290430 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.290756 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.298544 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.298608 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.298627 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.298651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.298669 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.308157 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.319988 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.332337 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.343613 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.359628 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.369678 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/75391841-4aa8-4f03-b696-23ac283642c4-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.369725 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/75391841-4aa8-4f03-b696-23ac283642c4-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.369765 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/75391841-4aa8-4f03-b696-23ac283642c4-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.369806 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bql55\" (UniqueName: \"kubernetes.io/projected/75391841-4aa8-4f03-b696-23ac283642c4-kube-api-access-bql55\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.372351 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.387576 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.401024 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.401084 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.401100 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.401123 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.401140 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.403680 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.420347 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.438597 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.450135 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.462982 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.471087 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bql55\" (UniqueName: \"kubernetes.io/projected/75391841-4aa8-4f03-b696-23ac283642c4-kube-api-access-bql55\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.471152 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/75391841-4aa8-4f03-b696-23ac283642c4-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.471201 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/75391841-4aa8-4f03-b696-23ac283642c4-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.471239 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/75391841-4aa8-4f03-b696-23ac283642c4-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.471912 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/75391841-4aa8-4f03-b696-23ac283642c4-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.471983 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/75391841-4aa8-4f03-b696-23ac283642c4-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.474712 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.477894 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/75391841-4aa8-4f03-b696-23ac283642c4-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.485611 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bql55\" (UniqueName: \"kubernetes.io/projected/75391841-4aa8-4f03-b696-23ac283642c4-kube-api-access-bql55\") pod \"ovnkube-control-plane-749d76644c-cs472\" (UID: \"75391841-4aa8-4f03-b696-23ac283642c4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.486622 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.502427 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"message\\\":\\\"3 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0216 11:07:22.792636 6173 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0216 11:07:22.792642 6173 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0216 11:07:22.792666 6173 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0216 11:07:22.792671 6173 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0216 11:07:22.792697 6173 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0216 11:07:22.792709 6173 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0216 11:07:22.792745 6173 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0216 11:07:22.792752 6173 handler.go:208] Removed *v1.Node event handler 2\\\\nI0216 11:07:22.792764 6173 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:22.792773 6173 handler.go:208] Removed *v1.Node event handler 7\\\\nI0216 11:07:22.792787 6173 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0216 11:07:22.792799 6173 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0216 11:07:22.793125 6173 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0216 11:07:22.793198 6173 factory.go:656] Stopping watch factory\\\\nI0216 11:07:22.793224 6173 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:25Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.503136 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.503206 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.503222 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.503245 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.503261 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.603902 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.605341 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.605371 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.605380 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.605395 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.605407 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:25 crc kubenswrapper[4949]: W0216 11:07:25.617257 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75391841_4aa8_4f03_b696_23ac283642c4.slice/crio-d38329669c540a32c8865d64249b59dece9aa302e351d5656b0d6ecbe38fcbb5 WatchSource:0}: Error finding container d38329669c540a32c8865d64249b59dece9aa302e351d5656b0d6ecbe38fcbb5: Status 404 returned error can't find the container with id d38329669c540a32c8865d64249b59dece9aa302e351d5656b0d6ecbe38fcbb5 Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.707941 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.707970 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.707979 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.707991 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.708000 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.810880 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.810917 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.810927 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.810941 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.810951 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.841113 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" event={"ID":"75391841-4aa8-4f03-b696-23ac283642c4","Type":"ContainerStarted","Data":"d38329669c540a32c8865d64249b59dece9aa302e351d5656b0d6ecbe38fcbb5"} Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.842628 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/1.log" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.913020 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.913062 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.913077 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.913096 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:25 crc kubenswrapper[4949]: I0216 11:07:25.913109 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:25Z","lastTransitionTime":"2026-02-16T11:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.015881 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.015955 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.015981 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.016015 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.016055 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.118488 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.118543 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.118556 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.118575 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.118600 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.210025 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 00:42:58.695957772 +0000 UTC Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.222297 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.222337 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.222350 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.222370 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.222385 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.234240 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.234285 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:26 crc kubenswrapper[4949]: E0216 11:07:26.234414 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.234526 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:26 crc kubenswrapper[4949]: E0216 11:07:26.234754 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:26 crc kubenswrapper[4949]: E0216 11:07:26.234846 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.324743 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.324788 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.324799 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.324816 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.324828 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.430821 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.430896 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.430920 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.430947 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.430975 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.533647 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.533675 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.533683 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.533696 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.533705 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.636411 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.636742 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.636755 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.636775 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.636792 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.739386 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.739418 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.739427 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.739441 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.739449 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.841674 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.841716 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.841730 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.841745 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.841757 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.852687 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" event={"ID":"75391841-4aa8-4f03-b696-23ac283642c4","Type":"ContainerStarted","Data":"d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.852744 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" event={"ID":"75391841-4aa8-4f03-b696-23ac283642c4","Type":"ContainerStarted","Data":"5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.868585 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.883044 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.900616 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.914318 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.935691 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.944357 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.944433 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.944457 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.944487 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.944509 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:26Z","lastTransitionTime":"2026-02-16T11:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.949754 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.965913 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.983763 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:26 crc kubenswrapper[4949]: I0216 11:07:26.997576 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:26Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.010046 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.022034 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.041802 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.047224 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.047267 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.047277 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.047293 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.047303 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.055506 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.069545 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.091793 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"message\\\":\\\"3 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0216 11:07:22.792636 6173 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0216 11:07:22.792642 6173 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0216 11:07:22.792666 6173 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0216 11:07:22.792671 6173 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0216 11:07:22.792697 6173 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0216 11:07:22.792709 6173 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0216 11:07:22.792745 6173 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0216 11:07:22.792752 6173 handler.go:208] Removed *v1.Node event handler 2\\\\nI0216 11:07:22.792764 6173 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:22.792773 6173 handler.go:208] Removed *v1.Node event handler 7\\\\nI0216 11:07:22.792787 6173 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0216 11:07:22.792799 6173 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0216 11:07:22.793125 6173 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0216 11:07:22.793198 6173 factory.go:656] Stopping watch factory\\\\nI0216 11:07:22.793224 6173 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.136320 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-6v4x7"] Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.136756 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.136813 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.149629 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.149675 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.149686 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.149706 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.149716 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.150291 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.162777 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.182619 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"message\\\":\\\"3 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0216 11:07:22.792636 6173 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0216 11:07:22.792642 6173 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0216 11:07:22.792666 6173 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0216 11:07:22.792671 6173 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0216 11:07:22.792697 6173 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0216 11:07:22.792709 6173 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0216 11:07:22.792745 6173 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0216 11:07:22.792752 6173 handler.go:208] Removed *v1.Node event handler 2\\\\nI0216 11:07:22.792764 6173 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:22.792773 6173 handler.go:208] Removed *v1.Node event handler 7\\\\nI0216 11:07:22.792787 6173 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0216 11:07:22.792799 6173 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0216 11:07:22.793125 6173 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0216 11:07:22.793198 6173 factory.go:656] Stopping watch factory\\\\nI0216 11:07:22.793224 6173 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.195724 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.208140 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.211253 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 21:35:43.150949103 +0000 UTC Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.221926 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.234818 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.249087 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.251604 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.251673 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.251692 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.251716 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.251733 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.266880 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.280832 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.289114 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.289396 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktpql\" (UniqueName: \"kubernetes.io/projected/965b4f20-8786-4c47-8721-c348942551d6-kube-api-access-ktpql\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.303119 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.317604 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.335958 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.354260 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.355122 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.355207 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.355219 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.355238 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.355250 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.370332 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.387476 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.390387 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktpql\" (UniqueName: \"kubernetes.io/projected/965b4f20-8786-4c47-8721-c348942551d6-kube-api-access-ktpql\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.390502 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.390672 4949 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.390765 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs podName:965b4f20-8786-4c47-8721-c348942551d6 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:27.890739357 +0000 UTC m=+37.520073522 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs") pod "network-metrics-daemon-6v4x7" (UID: "965b4f20-8786-4c47-8721-c348942551d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.419412 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktpql\" (UniqueName: \"kubernetes.io/projected/965b4f20-8786-4c47-8721-c348942551d6-kube-api-access-ktpql\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.458145 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.458515 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.458659 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.458804 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.458922 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.562551 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.562909 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.563105 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.563319 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.563460 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.595671 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.595721 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.595734 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.595753 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.595767 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.608028 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.612079 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.612119 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.612129 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.612145 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.612158 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.623936 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.627894 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.627945 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.627959 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.627978 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.627990 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.638821 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.642126 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.642173 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.642187 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.642219 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.642244 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.653579 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.657573 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.657594 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.657603 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.657617 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.657627 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.668979 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:27Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.669142 4949 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.670540 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.670562 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.670570 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.670583 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.670593 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.774115 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.774160 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.774178 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.774240 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.774264 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.877413 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.877493 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.877507 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.877531 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.877543 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.896568 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.896780 4949 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.896911 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs podName:965b4f20-8786-4c47-8721-c348942551d6 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:28.89686865 +0000 UTC m=+38.526202855 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs") pod "network-metrics-daemon-6v4x7" (UID: "965b4f20-8786-4c47-8721-c348942551d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.981354 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.981424 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.981437 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.981460 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.981475 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:27Z","lastTransitionTime":"2026-02-16T11:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.997972 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.998315 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998367 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:07:43.998310247 +0000 UTC m=+53.627644412 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998482 4949 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.998543 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998570 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:43.998537303 +0000 UTC m=+53.627871588 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.998608 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:27 crc kubenswrapper[4949]: I0216 11:07:27.998690 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998714 4949 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998737 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998765 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998782 4949 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998819 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998767 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:43.998751518 +0000 UTC m=+53.628085683 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998840 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998853 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:43.99884546 +0000 UTC m=+53.628179625 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998861 4949 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:27 crc kubenswrapper[4949]: E0216 11:07:27.998908 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:43.998894131 +0000 UTC m=+53.628228336 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.083411 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.083666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.083749 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.083819 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.083875 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.187659 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.187712 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.187729 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.187755 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.187773 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.212398 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 04:05:04.438607348 +0000 UTC Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.234438 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.234512 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:28 crc kubenswrapper[4949]: E0216 11:07:28.234617 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.234451 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:28 crc kubenswrapper[4949]: E0216 11:07:28.234794 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:28 crc kubenswrapper[4949]: E0216 11:07:28.234875 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.291986 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.292048 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.292070 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.292103 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.292124 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.394228 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.394270 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.394282 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.394299 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.394312 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.496995 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.497062 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.497074 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.497101 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.497121 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.600291 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.600337 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.600349 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.600365 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.600377 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.702796 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.702842 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.702859 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.702880 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.702892 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.805886 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.805928 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.805940 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.805957 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.805972 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.906606 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:28 crc kubenswrapper[4949]: E0216 11:07:28.906734 4949 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:28 crc kubenswrapper[4949]: E0216 11:07:28.906803 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs podName:965b4f20-8786-4c47-8721-c348942551d6 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:30.906774881 +0000 UTC m=+40.536109056 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs") pod "network-metrics-daemon-6v4x7" (UID: "965b4f20-8786-4c47-8721-c348942551d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.908769 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.908794 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.908802 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.908834 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:28 crc kubenswrapper[4949]: I0216 11:07:28.908844 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:28Z","lastTransitionTime":"2026-02-16T11:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.011836 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.011880 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.011891 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.011909 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.011922 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.114021 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.114064 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.114076 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.114095 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.114148 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.213594 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 20:33:49.77809891 +0000 UTC Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.216552 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.216598 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.216609 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.216633 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.216645 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.234931 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:29 crc kubenswrapper[4949]: E0216 11:07:29.235100 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.319147 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.319201 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.319212 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.319225 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.319234 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.421895 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.421947 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.421960 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.421977 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.421989 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.523991 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.524020 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.524028 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.524042 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.524051 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.625531 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.625576 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.625588 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.625604 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.625637 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.727722 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.727768 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.727782 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.727801 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.727819 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.830815 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.830895 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.830917 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.830951 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.830978 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.933993 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.934041 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.934053 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.934070 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:29 crc kubenswrapper[4949]: I0216 11:07:29.934083 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:29Z","lastTransitionTime":"2026-02-16T11:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.036960 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.037015 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.037032 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.037056 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.037074 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.139011 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.139048 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.139056 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.139068 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.139077 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.214335 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 11:53:40.49298597 +0000 UTC Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.234251 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.234275 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:30 crc kubenswrapper[4949]: E0216 11:07:30.234469 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:30 crc kubenswrapper[4949]: E0216 11:07:30.234523 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.234281 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:30 crc kubenswrapper[4949]: E0216 11:07:30.234586 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.241337 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.241395 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.241418 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.241443 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.241468 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.343553 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.343615 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.343631 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.343653 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.343670 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.445839 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.445903 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.445916 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.445938 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.445954 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.547728 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.547779 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.547797 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.547818 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.547833 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.650793 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.650833 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.650847 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.650863 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.650875 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.752997 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.753044 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.753058 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.753083 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.753108 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.855977 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.856015 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.856024 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.856041 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.856053 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.929521 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:30 crc kubenswrapper[4949]: E0216 11:07:30.929694 4949 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:30 crc kubenswrapper[4949]: E0216 11:07:30.929756 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs podName:965b4f20-8786-4c47-8721-c348942551d6 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:34.929737434 +0000 UTC m=+44.559071609 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs") pod "network-metrics-daemon-6v4x7" (UID: "965b4f20-8786-4c47-8721-c348942551d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.959114 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.959151 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.959174 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.959230 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:30 crc kubenswrapper[4949]: I0216 11:07:30.959249 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:30Z","lastTransitionTime":"2026-02-16T11:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.060977 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.061010 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.061019 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.061031 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.061043 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:31Z","lastTransitionTime":"2026-02-16T11:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.324887 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 12:59:42.032530421 +0000 UTC Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.325254 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:31 crc kubenswrapper[4949]: E0216 11:07:31.325359 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.328542 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.328599 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.328623 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.328653 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.328677 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:31Z","lastTransitionTime":"2026-02-16T11:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.368127 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.385704 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.406647 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.424755 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.431818 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.431857 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.431866 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.431879 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.431891 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:31Z","lastTransitionTime":"2026-02-16T11:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.451256 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.469720 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.482064 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.496060 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.513473 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.525949 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.534456 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.534520 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.534543 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.534572 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.534593 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:31Z","lastTransitionTime":"2026-02-16T11:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.541486 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.555607 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.568331 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.583891 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.596748 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.615178 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"message\\\":\\\"3 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0216 11:07:22.792636 6173 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0216 11:07:22.792642 6173 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0216 11:07:22.792666 6173 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0216 11:07:22.792671 6173 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0216 11:07:22.792697 6173 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0216 11:07:22.792709 6173 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0216 11:07:22.792745 6173 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0216 11:07:22.792752 6173 handler.go:208] Removed *v1.Node event handler 2\\\\nI0216 11:07:22.792764 6173 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:22.792773 6173 handler.go:208] Removed *v1.Node event handler 7\\\\nI0216 11:07:22.792787 6173 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0216 11:07:22.792799 6173 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0216 11:07:22.793125 6173 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0216 11:07:22.793198 6173 factory.go:656] Stopping watch factory\\\\nI0216 11:07:22.793224 6173 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:31Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.636541 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.636598 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.636616 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.636639 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.636656 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:31Z","lastTransitionTime":"2026-02-16T11:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.740054 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.740093 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.740106 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.740123 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.740137 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:31Z","lastTransitionTime":"2026-02-16T11:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.842643 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.842916 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.842993 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.843070 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.843133 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:31Z","lastTransitionTime":"2026-02-16T11:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.945482 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.945834 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.946073 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.946322 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:31 crc kubenswrapper[4949]: I0216 11:07:31.946528 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:31Z","lastTransitionTime":"2026-02-16T11:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.050319 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.050457 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.050531 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.050616 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.050702 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.154209 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.154454 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.154519 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.154598 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.154669 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.235130 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:32 crc kubenswrapper[4949]: E0216 11:07:32.235325 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.235359 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:32 crc kubenswrapper[4949]: E0216 11:07:32.235479 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.235519 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:32 crc kubenswrapper[4949]: E0216 11:07:32.235823 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.236040 4949 scope.go:117] "RemoveContainer" containerID="7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.257219 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.257675 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.257924 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.258138 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.258459 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.325355 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 21:51:58.060533861 +0000 UTC Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.361218 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.361508 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.361516 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.361530 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.361540 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.464596 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.464635 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.464645 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.464662 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.464678 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.566982 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.567026 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.567036 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.567054 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.567064 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.714930 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.714968 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.714981 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.714996 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.715007 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.817402 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.817444 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.817455 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.817471 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.817482 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.881708 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.883514 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.883880 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.900586 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:32Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.917768 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:32Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.919414 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.919452 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.919468 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.919492 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.919521 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:32Z","lastTransitionTime":"2026-02-16T11:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.942100 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ed370792765064d74485415cf3888e2c1a4abd34becb640eac8f30b2de77b19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"message\\\":\\\"3 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0216 11:07:22.792636 6173 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0216 11:07:22.792642 6173 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0216 11:07:22.792666 6173 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0216 11:07:22.792671 6173 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0216 11:07:22.792697 6173 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0216 11:07:22.792709 6173 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0216 11:07:22.792745 6173 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0216 11:07:22.792752 6173 handler.go:208] Removed *v1.Node event handler 2\\\\nI0216 11:07:22.792764 6173 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:22.792773 6173 handler.go:208] Removed *v1.Node event handler 7\\\\nI0216 11:07:22.792787 6173 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0216 11:07:22.792799 6173 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0216 11:07:22.793125 6173 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0216 11:07:22.793198 6173 factory.go:656] Stopping watch factory\\\\nI0216 11:07:22.793224 6173 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:32Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.953722 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:32Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.966437 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:32Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.979712 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:32Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:32 crc kubenswrapper[4949]: I0216 11:07:32.995339 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:32Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.012752 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.022309 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.022365 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.022383 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.022426 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.022443 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.031587 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.049627 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.068001 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.088161 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.104512 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.116829 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.125434 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.125491 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.125506 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.125526 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.125538 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.138038 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.164015 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:33Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.228869 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.228918 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.228934 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.228953 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.228967 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.235296 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:33 crc kubenswrapper[4949]: E0216 11:07:33.235486 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.325597 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 02:35:41.957345542 +0000 UTC Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.331784 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.331823 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.331845 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.331871 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.331887 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.440545 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.440631 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.440655 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.440685 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.440706 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.542572 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.542604 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.542613 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.542625 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.542633 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.645332 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.645425 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.645441 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.645459 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.645473 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.748579 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.748644 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.748669 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.748700 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.748725 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.851459 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.851501 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.851513 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.851530 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.851542 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.954414 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.954452 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.954462 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.954477 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:33 crc kubenswrapper[4949]: I0216 11:07:33.954488 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:33Z","lastTransitionTime":"2026-02-16T11:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.057382 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.057438 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.057452 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.057470 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.057490 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.161086 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.161137 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.161150 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.161174 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.161205 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.235046 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.235085 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.235121 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:34 crc kubenswrapper[4949]: E0216 11:07:34.235302 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:34 crc kubenswrapper[4949]: E0216 11:07:34.235518 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:34 crc kubenswrapper[4949]: E0216 11:07:34.235757 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.264204 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.264280 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.264315 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.264345 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.264367 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.326103 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 04:19:41.399227699 +0000 UTC Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.367592 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.367641 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.367655 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.367673 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.367686 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.470613 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.470656 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.470669 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.470686 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.470698 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.573530 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.573591 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.573622 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.573663 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.573687 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.676136 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.676206 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.676217 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.676233 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.676247 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.778862 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.778893 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.778900 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.778914 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.778923 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.881738 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.881791 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.881802 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.881821 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.881833 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.963124 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:34 crc kubenswrapper[4949]: E0216 11:07:34.963408 4949 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:34 crc kubenswrapper[4949]: E0216 11:07:34.963550 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs podName:965b4f20-8786-4c47-8721-c348942551d6 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:42.96352382 +0000 UTC m=+52.592857985 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs") pod "network-metrics-daemon-6v4x7" (UID: "965b4f20-8786-4c47-8721-c348942551d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.985080 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.985116 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.985125 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.985143 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:34 crc kubenswrapper[4949]: I0216 11:07:34.985157 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:34Z","lastTransitionTime":"2026-02-16T11:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.087805 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.087855 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.087867 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.087885 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.087896 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.190464 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.190502 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.190511 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.190525 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.190535 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.235013 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:35 crc kubenswrapper[4949]: E0216 11:07:35.235240 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.293263 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.293306 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.293322 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.293343 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.293359 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.327216 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 10:45:29.436501932 +0000 UTC Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.397082 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.397135 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.397148 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.397168 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.397207 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.499641 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.499690 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.499707 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.499731 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.499748 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.601462 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.601488 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.601498 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.601510 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.601520 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.704283 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.704603 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.704814 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.705237 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.705587 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.808131 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.808497 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.808651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.808823 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.809042 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.911476 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.911578 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.911596 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.911621 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:35 crc kubenswrapper[4949]: I0216 11:07:35.911639 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:35Z","lastTransitionTime":"2026-02-16T11:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.013780 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.013811 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.013819 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.013831 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.013840 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.116943 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.117248 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.117357 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.117475 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.117560 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.220387 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.220448 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.220470 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.220496 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.220514 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.234428 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.234430 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:36 crc kubenswrapper[4949]: E0216 11:07:36.234636 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:36 crc kubenswrapper[4949]: E0216 11:07:36.234750 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.234866 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:36 crc kubenswrapper[4949]: E0216 11:07:36.235456 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.236054 4949 scope.go:117] "RemoveContainer" containerID="b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.253269 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.267967 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.281769 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.298137 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.315628 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.322443 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.322492 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.322509 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.322532 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.322553 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.328118 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 19:50:42.089246777 +0000 UTC Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.336975 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.351326 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.367032 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.379921 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.401353 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.423003 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.425532 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.425565 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.425575 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.425592 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.425605 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.439773 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.454364 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.474615 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.488706 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.499905 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.527784 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.527816 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.527824 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.527838 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.527847 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.630081 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.630146 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.630165 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.630215 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.630231 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.732334 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.732386 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.732394 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.732408 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.732418 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.834813 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.834847 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.834856 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.834870 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.834878 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.905579 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/1.log" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.908133 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.908641 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.921734 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.932339 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.938752 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.939001 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.939084 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.939232 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.939321 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:36Z","lastTransitionTime":"2026-02-16T11:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.957772 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.972360 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.984425 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:36 crc kubenswrapper[4949]: I0216 11:07:36.997042 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:36Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.006333 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.016627 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.026997 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.041786 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.041835 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.041849 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.041869 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.041880 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.042420 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.054090 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.064868 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.080848 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.091255 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.102915 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.113811 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.144349 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.144380 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.144390 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.144404 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.144414 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.235137 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:37 crc kubenswrapper[4949]: E0216 11:07:37.235410 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.246367 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.246406 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.246414 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.246429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.246438 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.328463 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 04:59:31.268827407 +0000 UTC Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.348278 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.348325 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.348336 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.348351 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.348361 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.450918 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.450963 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.450977 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.450997 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.451010 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.553628 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.553673 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.553688 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.553710 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.553727 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.655823 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.655873 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.655889 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.655908 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.655922 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.759054 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.759113 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.759134 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.759159 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.759213 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.861670 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.861738 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.861761 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.861791 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.861814 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.914546 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/2.log" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.915503 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/1.log" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.919197 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11" exitCode=1 Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.919233 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.919268 4949 scope.go:117] "RemoveContainer" containerID="b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.920062 4949 scope.go:117] "RemoveContainer" containerID="522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11" Feb 16 11:07:37 crc kubenswrapper[4949]: E0216 11:07:37.920290 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.941122 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.960162 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.964309 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.964379 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.964394 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.964412 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.964423 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:37Z","lastTransitionTime":"2026-02-16T11:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.980710 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:37 crc kubenswrapper[4949]: I0216 11:07:37.998679 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:37Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.015741 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.029308 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.045822 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.062507 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.071654 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.071741 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.071763 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.071806 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.071824 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.074895 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.074932 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.074944 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.074963 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.074976 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.093635 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.096483 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b29470aea2f9de283595ee2dba6e70e842f131de5d8a942668a6a23f3caa9751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:24Z\\\",\\\"message\\\":\\\"0216 11:07:24.662034 6375 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:24Z is after 2025-08-24T17:21:41Z]\\\\nI0216 11:07:24.661977 6375 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress-canary/ingress-canary\\\\\\\"}\\\\nI0216 11:07:24.662021 6375 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-cluster-version/cluster-version-operator]} name:Service_openshift-cluster-version/cluster-version-operator_TCP_cluster options:{GoMap:map[event:false h\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.097482 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.097551 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.097571 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.097597 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.097616 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.111686 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.113175 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.115097 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.115127 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.115136 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.115150 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.115159 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.126030 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.128277 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.131275 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.131384 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.131459 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.131525 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.131580 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.137851 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.143331 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.147219 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.147352 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.147462 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.147554 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.147646 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.149356 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.162063 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.164858 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.165292 4949 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.174217 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.174250 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.174262 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.174280 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.174291 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.179038 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.189396 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.234982 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.234982 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.235119 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.235012 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.235206 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.235274 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.277174 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.277241 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.277254 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.277269 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.277279 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.329445 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 20:31:26.343655517 +0000 UTC Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.379542 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.379596 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.379619 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.379648 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.379670 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.482280 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.482345 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.482367 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.482395 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.482419 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.584459 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.584489 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.584499 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.584511 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.584519 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.686993 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.687073 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.687103 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.687141 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.687164 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.790214 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.790276 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.790291 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.790307 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.790318 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.892061 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.892091 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.892099 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.892112 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.892120 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.923154 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/2.log" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.926408 4949 scope.go:117] "RemoveContainer" containerID="522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11" Feb 16 11:07:38 crc kubenswrapper[4949]: E0216 11:07:38.926606 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.944460 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.962818 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.986997 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.995019 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.995074 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.995092 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.995117 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:38 crc kubenswrapper[4949]: I0216 11:07:38.995134 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:38Z","lastTransitionTime":"2026-02-16T11:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.000708 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:38Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.011905 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.025920 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.037071 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.048719 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.061968 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.071634 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.091119 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.097621 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.097703 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.097730 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.097761 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.097784 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.109151 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.123503 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.139906 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.152017 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.169532 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:39Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.200089 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.200115 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.200122 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.200136 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.200146 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.235085 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:39 crc kubenswrapper[4949]: E0216 11:07:39.235297 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.302642 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.303064 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.303254 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.303399 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.303589 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.330475 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 23:52:27.785705311 +0000 UTC Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.406891 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.406940 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.406953 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.406973 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.406990 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.510102 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.510161 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.510207 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.510231 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.510249 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.613248 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.613312 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.613329 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.613355 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.613372 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.715960 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.716024 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.716042 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.716067 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.716086 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.819475 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.819802 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.820168 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.820339 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.820480 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.922058 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.922316 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.922588 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.922781 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:39 crc kubenswrapper[4949]: I0216 11:07:39.922870 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:39Z","lastTransitionTime":"2026-02-16T11:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.025829 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.025860 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.025871 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.025885 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.025894 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.129572 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.129647 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.129671 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.129704 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.129723 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.232818 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.232880 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.232897 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.232922 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.232942 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.235218 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.235279 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:40 crc kubenswrapper[4949]: E0216 11:07:40.235381 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.235238 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:40 crc kubenswrapper[4949]: E0216 11:07:40.235551 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:40 crc kubenswrapper[4949]: E0216 11:07:40.235700 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.331142 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 02:13:27.683968408 +0000 UTC Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.334686 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.334763 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.334781 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.334811 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.334834 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.441472 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.441928 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.442138 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.442402 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.442587 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.545493 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.545569 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.545593 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.545622 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.545642 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.648820 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.648880 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.648897 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.648919 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.648936 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.751273 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.751662 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.751802 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.751951 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.752068 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.860842 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.860884 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.860895 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.860910 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.860921 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.963781 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.963837 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.963854 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.963880 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:40 crc kubenswrapper[4949]: I0216 11:07:40.963901 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:40Z","lastTransitionTime":"2026-02-16T11:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.066535 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.066580 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.066589 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.066602 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.066613 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.169245 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.169281 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.169289 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.169303 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.169312 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.235132 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:41 crc kubenswrapper[4949]: E0216 11:07:41.235529 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.250831 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.265727 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.271526 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.271558 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.271568 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.271583 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.271595 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.277224 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.287816 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.299622 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.317784 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.328628 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.331238 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 18:32:12.983137443 +0000 UTC Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.343762 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.361198 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.374559 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.374582 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.374591 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.374604 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.374614 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.375681 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.387996 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.403590 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.420486 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.445797 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.457003 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.474537 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:41Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.476242 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.476285 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.476294 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.476308 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.476317 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.579267 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.579315 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.579328 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.579346 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.579357 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.681790 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.681870 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.681896 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.681928 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.681947 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.784304 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.784352 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.784363 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.784379 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.784390 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.886254 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.886303 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.886318 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.886343 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.886359 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.988986 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.989057 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.989079 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.989112 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:41 crc kubenswrapper[4949]: I0216 11:07:41.989137 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:41Z","lastTransitionTime":"2026-02-16T11:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.092029 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.092075 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.092091 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.092114 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.092132 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.194974 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.195090 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.195118 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.195154 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.195232 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.234732 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.234769 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.234752 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:42 crc kubenswrapper[4949]: E0216 11:07:42.234902 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:42 crc kubenswrapper[4949]: E0216 11:07:42.235051 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:42 crc kubenswrapper[4949]: E0216 11:07:42.235145 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.297050 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.297103 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.297115 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.297134 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.297146 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.331992 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 09:39:12.51246429 +0000 UTC Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.400003 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.400042 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.400051 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.400066 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.400074 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.502126 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.502185 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.502201 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.502218 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.502229 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.604451 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.604517 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.604529 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.604547 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.604558 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.624022 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.635263 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.642647 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.658489 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.687929 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.701753 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.707607 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.707675 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.707700 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.707732 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.707754 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.721869 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.739401 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.763521 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.776772 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.798243 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.810499 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.810565 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.810588 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.810619 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.810644 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.811808 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.829729 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.844884 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.858395 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.871932 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.884322 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.898868 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:42Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.912598 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.912641 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.912651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.912666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.912676 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:42Z","lastTransitionTime":"2026-02-16T11:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:42 crc kubenswrapper[4949]: I0216 11:07:42.982373 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:42 crc kubenswrapper[4949]: E0216 11:07:42.982493 4949 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:42 crc kubenswrapper[4949]: E0216 11:07:42.982554 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs podName:965b4f20-8786-4c47-8721-c348942551d6 nodeName:}" failed. No retries permitted until 2026-02-16 11:07:58.982541598 +0000 UTC m=+68.611875763 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs") pod "network-metrics-daemon-6v4x7" (UID: "965b4f20-8786-4c47-8721-c348942551d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.015109 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.015141 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.015150 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.015163 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.015201 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.118058 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.118094 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.118101 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.118114 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.118123 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.221529 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.221614 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.221651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.221681 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.221702 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.234371 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:43 crc kubenswrapper[4949]: E0216 11:07:43.234589 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.325051 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.325140 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.325165 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.325250 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.325274 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.332481 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 07:38:54.519323455 +0000 UTC Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.427535 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.427616 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.427653 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.427689 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.427715 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.530043 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.530131 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.530165 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.530244 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.530268 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.632821 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.632995 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.633028 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.633057 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.633079 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.735199 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.735231 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.735241 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.735256 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.735267 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.837657 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.837713 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.837725 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.837772 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.837787 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.940673 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.940715 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.940729 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.940747 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:43 crc kubenswrapper[4949]: I0216 11:07:43.940759 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:43Z","lastTransitionTime":"2026-02-16T11:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.043013 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.043132 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.043274 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.043372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.043409 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.095705 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.095870 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.095895 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:08:16.095860943 +0000 UTC m=+85.725195168 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096019 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096056 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096075 4949 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096110 4949 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096132 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-16 11:08:16.09611418 +0000 UTC m=+85.725448385 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.095955 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096247 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:08:16.096158682 +0000 UTC m=+85.725492897 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.096365 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.096460 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096494 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096514 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096523 4949 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096549 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-16 11:08:16.096540923 +0000 UTC m=+85.725875088 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096663 4949 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.096755 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:08:16.096731468 +0000 UTC m=+85.726065663 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.146677 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.146722 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.146731 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.146744 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.146754 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.234411 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.234464 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.234523 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.234411 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.234589 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:44 crc kubenswrapper[4949]: E0216 11:07:44.234696 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.248839 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.248943 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.248968 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.249002 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.249027 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.332915 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 02:04:32.354416559 +0000 UTC Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.352841 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.352915 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.352941 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.352971 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.352997 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.455964 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.456055 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.456087 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.456118 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.456139 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.559454 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.559799 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.559818 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.559841 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.559858 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.662928 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.662996 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.663013 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.663037 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.663054 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.765902 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.765989 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.766017 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.766049 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.766073 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.869281 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.869323 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.869335 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.869352 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.869365 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.972739 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.972803 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.972823 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.972847 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:44 crc kubenswrapper[4949]: I0216 11:07:44.972865 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:44Z","lastTransitionTime":"2026-02-16T11:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.076709 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.076774 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.076791 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.076819 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.076836 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.180050 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.180121 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.180137 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.180159 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.180194 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.234723 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:45 crc kubenswrapper[4949]: E0216 11:07:45.234938 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.283089 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.283131 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.283141 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.283158 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.283187 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.333300 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 09:59:05.12868878 +0000 UTC Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.386409 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.386457 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.386468 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.386483 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.386494 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.489370 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.489414 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.489424 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.489441 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.489452 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.591972 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.592003 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.592011 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.592022 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.592030 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.694802 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.694864 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.694885 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.694913 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.694934 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.798236 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.798277 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.798289 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.798306 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.798316 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.900757 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.900791 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.900820 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.900836 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:45 crc kubenswrapper[4949]: I0216 11:07:45.900847 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:45Z","lastTransitionTime":"2026-02-16T11:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.003457 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.003509 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.003522 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.003538 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.003550 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.106103 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.106201 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.106217 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.106237 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.106251 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.209777 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.209822 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.209837 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.209856 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.209872 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.235002 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.235037 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:46 crc kubenswrapper[4949]: E0216 11:07:46.235147 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.235454 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:46 crc kubenswrapper[4949]: E0216 11:07:46.235643 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:46 crc kubenswrapper[4949]: E0216 11:07:46.235713 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.312284 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.312334 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.312350 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.312367 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.312379 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.334116 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 19:23:36.268638539 +0000 UTC Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.415027 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.415057 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.415069 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.415085 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.415094 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.519039 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.519110 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.519123 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.519139 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.519150 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.622447 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.622502 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.622520 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.622544 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.622560 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.725637 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.725762 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.725793 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.725822 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.725839 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.828509 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.828631 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.828654 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.828683 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.828705 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.931112 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.931164 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.931205 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.931224 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:46 crc kubenswrapper[4949]: I0216 11:07:46.931237 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:46Z","lastTransitionTime":"2026-02-16T11:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.033721 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.033783 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.033803 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.033831 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.033854 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.136987 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.137050 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.137068 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.137093 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.137111 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.234427 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:47 crc kubenswrapper[4949]: E0216 11:07:47.234613 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.245857 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.245886 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.245895 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.245907 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.245916 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.335086 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 00:14:11.971624062 +0000 UTC Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.348555 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.348605 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.348616 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.348634 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.348646 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.450805 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.450866 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.450884 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.450911 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.450928 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.553148 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.553213 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.553314 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.553334 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.553344 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.656237 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.656300 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.656310 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.656330 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.656343 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.759677 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.759743 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.759765 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.759797 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.759821 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.862863 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.862925 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.862934 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.862955 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.862969 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.965154 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.965247 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.965263 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.965286 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:47 crc kubenswrapper[4949]: I0216 11:07:47.965303 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:47Z","lastTransitionTime":"2026-02-16T11:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.067518 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.067578 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.067599 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.067628 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.067651 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.169645 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.169684 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.169693 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.169707 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.169718 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.234608 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.234703 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.234803 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.234715 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.234926 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.235025 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.269812 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.269878 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.269889 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.269909 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.269920 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.282868 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.287694 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.287734 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.287742 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.287756 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.287765 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.307368 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.311439 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.311563 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.311582 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.311606 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.311623 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.330696 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.335316 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.335400 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 22:48:29.018700986 +0000 UTC Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.335410 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.335439 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.335456 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.335467 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.351379 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.354984 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.355041 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.355059 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.355081 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.355099 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.371084 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: E0216 11:07:48.371265 4949 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.372947 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.373013 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.373027 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.373046 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.373060 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.475756 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.475815 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.475827 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.475857 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.475873 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.531223 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.551872 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.570824 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.578957 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.579014 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.579025 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.579046 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.579059 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.588634 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.602130 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.619086 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.636153 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.652335 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.665064 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.681395 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.681472 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.681483 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.681944 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.681970 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.685925 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.700580 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.712680 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.733269 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.744694 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.759368 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.770538 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.781734 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.784336 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.784379 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.784396 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.784422 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.784439 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.791737 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:48Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.887220 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.887246 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.887254 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.887266 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.887275 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.990096 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.990231 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.990261 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.990293 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:48 crc kubenswrapper[4949]: I0216 11:07:48.990317 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:48Z","lastTransitionTime":"2026-02-16T11:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.093162 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.093250 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.093268 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.093294 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.093311 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.196026 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.196100 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.196117 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.196141 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.196158 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.234637 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:49 crc kubenswrapper[4949]: E0216 11:07:49.234865 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.298571 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.298655 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.298671 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.298689 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.298704 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.336085 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 23:23:56.480547586 +0000 UTC Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.401603 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.401666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.401676 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.401691 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.401701 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.504742 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.504808 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.504824 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.504848 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.504865 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.607428 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.607487 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.607504 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.607523 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.607535 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.709083 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.709114 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.709122 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.709139 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.709148 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.812088 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.812143 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.812154 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.812186 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.812201 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.915076 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.915428 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.915474 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.915507 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:49 crc kubenswrapper[4949]: I0216 11:07:49.915535 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:49Z","lastTransitionTime":"2026-02-16T11:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.018609 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.018667 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.018684 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.018708 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.019070 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.122651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.122713 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.122731 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.122756 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.122773 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.225536 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.225607 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.225624 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.225651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.225668 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.234314 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.234381 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.234355 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:50 crc kubenswrapper[4949]: E0216 11:07:50.234560 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:50 crc kubenswrapper[4949]: E0216 11:07:50.234702 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:50 crc kubenswrapper[4949]: E0216 11:07:50.234997 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.328415 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.328470 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.328486 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.328508 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.328526 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.337605 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 03:13:23.788713865 +0000 UTC Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.431367 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.431405 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.431413 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.431427 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.431437 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.533935 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.533975 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.533984 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.533998 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.534008 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.636857 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.636890 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.636899 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.636911 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.636920 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.739976 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.740026 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.740056 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.740071 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.740083 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.843332 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.843383 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.843401 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.843425 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.843442 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.946217 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.946346 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.946367 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.946393 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:50 crc kubenswrapper[4949]: I0216 11:07:50.946415 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:50Z","lastTransitionTime":"2026-02-16T11:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.049533 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.049567 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.049578 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.049595 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.049607 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.152063 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.152150 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.152194 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.152221 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.152240 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.234247 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:51 crc kubenswrapper[4949]: E0216 11:07:51.234771 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.236502 4949 scope.go:117] "RemoveContainer" containerID="522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11" Feb 16 11:07:51 crc kubenswrapper[4949]: E0216 11:07:51.237257 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.246784 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.254109 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.254162 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.254211 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.254233 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.254248 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.264486 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.280188 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.292327 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.306059 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.319333 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.331283 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.337842 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 05:43:13.672418451 +0000 UTC Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.345359 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.356249 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.356275 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.356285 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.356300 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.356309 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.358383 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.369718 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.383463 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.403472 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.415341 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.432087 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.446514 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.460723 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.461233 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.461288 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.461305 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.461324 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.461338 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.473247 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:51Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.564399 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.564472 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.564495 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.564525 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.564546 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.667598 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.667677 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.667703 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.667734 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.667758 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.771273 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.771856 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.771873 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.771905 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.771930 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.874765 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.874812 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.874822 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.874837 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.874848 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.978237 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.978308 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.978320 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.978346 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:51 crc kubenswrapper[4949]: I0216 11:07:51.978421 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:51Z","lastTransitionTime":"2026-02-16T11:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.086455 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.086509 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.086521 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.086541 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.086553 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.188940 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.189029 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.189046 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.189070 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.189087 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.234639 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.234741 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:52 crc kubenswrapper[4949]: E0216 11:07:52.234835 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.234847 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:52 crc kubenswrapper[4949]: E0216 11:07:52.234988 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:52 crc kubenswrapper[4949]: E0216 11:07:52.235261 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.291893 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.291934 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.291944 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.291957 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.291967 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.338909 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 05:10:40.968425828 +0000 UTC Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.394159 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.394247 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.394265 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.394290 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.394307 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.497568 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.497610 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.497622 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.497640 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.497652 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.601162 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.601256 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.601279 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.601309 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.601331 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.703585 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.703636 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.703646 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.703669 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.703686 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.806710 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.806765 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.806778 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.806799 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.806814 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.909808 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.909863 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.909878 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.909901 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:52 crc kubenswrapper[4949]: I0216 11:07:52.909916 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:52Z","lastTransitionTime":"2026-02-16T11:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.013147 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.013208 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.013220 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.013239 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.013253 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.115608 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.115646 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.115656 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.115668 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.115677 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.219347 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.219405 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.219421 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.219444 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.219545 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.235277 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:53 crc kubenswrapper[4949]: E0216 11:07:53.235441 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.322458 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.322516 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.322540 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.322570 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.322596 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.339996 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 05:14:27.027031676 +0000 UTC Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.424859 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.424925 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.424948 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.424979 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.425001 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.527265 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.527337 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.527372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.527396 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.527412 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.630163 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.630222 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.630236 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.630252 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.630263 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.732703 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.732764 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.732782 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.732811 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.732832 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.835999 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.836066 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.836078 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.836123 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.836150 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.939599 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.939666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.939687 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.939716 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:53 crc kubenswrapper[4949]: I0216 11:07:53.939737 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:53Z","lastTransitionTime":"2026-02-16T11:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.042736 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.042791 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.042808 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.042829 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.042845 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.146060 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.146129 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.146147 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.146208 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.146226 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.234307 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.234354 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.234310 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:54 crc kubenswrapper[4949]: E0216 11:07:54.234520 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:54 crc kubenswrapper[4949]: E0216 11:07:54.234814 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:54 crc kubenswrapper[4949]: E0216 11:07:54.234875 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.249582 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.249733 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.249754 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.249775 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.249791 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.340552 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 19:25:24.738693634 +0000 UTC Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.352905 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.352963 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.352979 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.353674 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.353733 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.456122 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.456192 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.456207 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.456225 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.456237 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.558651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.558735 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.558752 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.558770 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.558784 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.661724 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.661770 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.661782 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.661802 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.661816 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.763579 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.763636 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.763653 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.763676 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.763695 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.866056 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.866299 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.866311 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.866332 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.866344 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.968490 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.968561 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.968574 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.968591 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:54 crc kubenswrapper[4949]: I0216 11:07:54.968603 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:54Z","lastTransitionTime":"2026-02-16T11:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.071353 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.071385 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.071394 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.071407 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.071416 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.173910 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.173955 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.173969 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.173982 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.173991 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.234609 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:55 crc kubenswrapper[4949]: E0216 11:07:55.234872 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.276457 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.276507 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.276520 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.276540 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.276553 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.340909 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 08:12:30.90223153 +0000 UTC Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.379019 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.379059 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.379072 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.379089 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.379100 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.480809 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.480843 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.480852 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.480865 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.480875 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.583976 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.584018 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.584028 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.584044 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.584056 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.687937 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.688009 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.688029 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.688059 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.688077 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.796550 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.796594 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.796604 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.796620 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.796632 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.898760 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.898801 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.898813 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.898829 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:55 crc kubenswrapper[4949]: I0216 11:07:55.898843 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:55Z","lastTransitionTime":"2026-02-16T11:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.001210 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.001248 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.001283 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.001303 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.001315 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.103629 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.103671 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.103680 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.103695 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.103705 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.206895 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.206982 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.207000 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.207023 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.207042 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.234586 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.234604 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.234687 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:56 crc kubenswrapper[4949]: E0216 11:07:56.234810 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:56 crc kubenswrapper[4949]: E0216 11:07:56.234846 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:56 crc kubenswrapper[4949]: E0216 11:07:56.234922 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.309631 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.309680 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.309696 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.309718 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.309735 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.341250 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 05:36:35.72610524 +0000 UTC Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.413049 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.413093 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.413106 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.413125 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.413140 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.515874 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.515914 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.515925 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.515940 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.515951 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.645279 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.645364 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.645379 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.645401 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.645413 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.747485 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.747530 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.747544 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.747561 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.747602 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.850201 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.850242 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.850254 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.850274 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.850288 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.953146 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.953211 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.953225 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.953241 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:56 crc kubenswrapper[4949]: I0216 11:07:56.953252 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:56Z","lastTransitionTime":"2026-02-16T11:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.055346 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.055391 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.055403 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.055422 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.055436 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.158454 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.158501 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.158513 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.158531 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.158543 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.234432 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:57 crc kubenswrapper[4949]: E0216 11:07:57.234611 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.260921 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.260969 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.260981 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.260997 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.261010 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.341767 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 20:31:01.691726764 +0000 UTC Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.363727 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.363763 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.363796 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.363811 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.363822 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.466522 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.466568 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.466581 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.466602 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.466613 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.569226 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.569273 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.569284 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.569298 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.569307 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.671829 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.671923 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.671940 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.671957 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.671967 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.774655 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.774717 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.774727 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.774741 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.774750 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.876979 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.877019 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.877036 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.877052 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.877063 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.979709 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.979769 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.979841 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.979873 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:57 crc kubenswrapper[4949]: I0216 11:07:57.979888 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:57Z","lastTransitionTime":"2026-02-16T11:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.083254 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.083300 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.083308 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.083324 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.083336 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.186504 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.186558 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.186572 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.186597 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.186610 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.235011 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.235123 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.235231 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.235335 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.235460 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.235573 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.289206 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.289263 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.289276 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.289300 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.289313 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.342575 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 02:57:41.746689513 +0000 UTC Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.392224 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.392285 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.392307 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.392329 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.392345 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.495280 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.495332 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.495342 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.495362 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.495381 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.597942 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.598010 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.598028 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.598052 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.598068 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.648162 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.648252 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.648272 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.648311 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.648329 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.665962 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:58Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.670104 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.670151 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.670167 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.670216 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.670239 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.685417 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:58Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.690424 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.690452 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.690461 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.690484 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.690502 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.702617 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:58Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.706478 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.706506 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.706533 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.706550 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.706561 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.720233 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:58Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.724356 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.724454 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.724472 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.724501 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.724514 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.737291 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:07:58Z is after 2025-08-24T17:21:41Z" Feb 16 11:07:58 crc kubenswrapper[4949]: E0216 11:07:58.737462 4949 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.739372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.739408 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.739419 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.739434 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.739448 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.842303 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.842350 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.842361 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.842382 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.842397 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.945163 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.945250 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.945264 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.945281 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:58 crc kubenswrapper[4949]: I0216 11:07:58.945292 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:58Z","lastTransitionTime":"2026-02-16T11:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.049105 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.049213 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.049227 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.049245 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.049276 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.112243 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:59 crc kubenswrapper[4949]: E0216 11:07:59.112480 4949 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:59 crc kubenswrapper[4949]: E0216 11:07:59.112610 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs podName:965b4f20-8786-4c47-8721-c348942551d6 nodeName:}" failed. No retries permitted until 2026-02-16 11:08:31.112582512 +0000 UTC m=+100.741916777 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs") pod "network-metrics-daemon-6v4x7" (UID: "965b4f20-8786-4c47-8721-c348942551d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.151698 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.151745 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.151757 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.151777 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.151795 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.234525 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:07:59 crc kubenswrapper[4949]: E0216 11:07:59.234704 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.254893 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.254959 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.254971 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.254997 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.255011 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.342738 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 11:36:17.178330082 +0000 UTC Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.356944 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.357009 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.357032 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.357062 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.357085 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.460429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.460480 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.460492 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.460515 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.460530 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.564025 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.564071 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.564087 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.564108 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.564125 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.667362 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.667423 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.667436 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.667454 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.667465 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.770077 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.770139 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.770162 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.770236 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.770259 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.873533 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.873575 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.873591 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.873613 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.873629 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.976247 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.976300 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.976313 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.976336 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:07:59 crc kubenswrapper[4949]: I0216 11:07:59.976352 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:07:59Z","lastTransitionTime":"2026-02-16T11:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.079089 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.079132 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.079143 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.079159 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.079187 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.181681 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.181745 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.181760 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.181785 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.181802 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.235156 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.235266 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:00 crc kubenswrapper[4949]: E0216 11:08:00.235358 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.235289 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:00 crc kubenswrapper[4949]: E0216 11:08:00.235457 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:00 crc kubenswrapper[4949]: E0216 11:08:00.235540 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.284427 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.284479 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.284492 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.284513 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.284529 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.342934 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 10:08:42.90167292 +0000 UTC Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.388048 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.388100 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.388112 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.388134 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.388151 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.490846 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.490898 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.490913 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.490936 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.490948 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.594147 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.594228 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.594237 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.594255 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.594267 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.699392 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.699439 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.699450 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.699467 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.699480 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.801377 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.801408 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.801417 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.801431 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.801441 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.903753 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.903795 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.903804 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.903817 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:00 crc kubenswrapper[4949]: I0216 11:08:00.903827 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:00Z","lastTransitionTime":"2026-02-16T11:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.006328 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.006358 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.006368 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.006381 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.006391 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.109609 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.109638 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.109648 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.109661 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.109670 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.214156 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.214214 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.214226 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.214250 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.214262 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.236783 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:01 crc kubenswrapper[4949]: E0216 11:08:01.236913 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.256082 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.256624 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.270637 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.281739 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.294197 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.305087 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.318877 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.318915 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.318952 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.318972 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.318984 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.319730 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.329501 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.342758 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.343032 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-06 23:02:45.642633129 +0000 UTC Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.353872 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.364417 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.376367 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.389373 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.401393 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.411142 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.421674 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.421718 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.421730 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.421747 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.421761 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.429362 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.440375 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.454532 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:01Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.523591 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.523629 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.523640 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.523655 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.523669 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.626136 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.626194 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.626203 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.626218 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.626228 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.732156 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.732218 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.732229 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.732242 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.732253 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.834667 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.834739 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.834760 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.834789 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.834806 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.937305 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.937346 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.937357 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.937372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:01 crc kubenswrapper[4949]: I0216 11:08:01.937382 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:01Z","lastTransitionTime":"2026-02-16T11:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.039545 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.039597 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.039610 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.039634 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.039650 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.142560 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.142627 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.142642 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.142674 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.142691 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.234807 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.234829 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.234836 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:02 crc kubenswrapper[4949]: E0216 11:08:02.234942 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:02 crc kubenswrapper[4949]: E0216 11:08:02.235034 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:02 crc kubenswrapper[4949]: E0216 11:08:02.235163 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.245380 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.245421 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.245429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.245444 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.245453 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.344057 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 08:45:34.210545708 +0000 UTC Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.347643 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.347677 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.347686 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.347699 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.347707 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.449596 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.449634 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.449642 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.449658 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.449667 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.552562 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.552619 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.552633 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.552651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.552670 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.654864 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.654931 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.654946 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.654967 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.654979 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.757835 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.757894 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.757911 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.757934 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.757949 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.861592 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.861658 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.861671 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.861691 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.861705 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.963788 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.963824 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.963833 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.963847 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:02 crc kubenswrapper[4949]: I0216 11:08:02.963857 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:02Z","lastTransitionTime":"2026-02-16T11:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.014647 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/0.log" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.014725 4949 generic.go:334] "Generic (PLEG): container finished" podID="3e42a398-f83a-4463-9ab7-4e093e80d744" containerID="9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff" exitCode=1 Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.014773 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jsmls" event={"ID":"3e42a398-f83a-4463-9ab7-4e093e80d744","Type":"ContainerDied","Data":"9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.015393 4949 scope.go:117] "RemoveContainer" containerID="9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.030285 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.044997 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.057692 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.068512 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.068776 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.068834 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.068844 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.068894 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.068909 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.079321 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.091090 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:02Z\\\",\\\"message\\\":\\\"2026-02-16T11:07:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57\\\\n2026-02-16T11:07:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57 to /host/opt/cni/bin/\\\\n2026-02-16T11:07:17Z [verbose] multus-daemon started\\\\n2026-02-16T11:07:17Z [verbose] Readiness Indicator file check\\\\n2026-02-16T11:08:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.105685 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.120454 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.131853 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.148251 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.159749 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b045625-b94b-4268-b1f3-4a735fc15ace\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26a3c0d2164b3d6a615dce1de93b44a4a85b1831d656a20e5ce11909fad6d776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.170560 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.171069 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.171097 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.171109 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.171126 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.171137 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.183594 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.193444 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.205494 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.218457 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.231152 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.234631 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:03 crc kubenswrapper[4949]: E0216 11:08:03.234724 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.243674 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:03Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.273438 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.273480 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.273491 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.273509 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.273521 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.345275 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 15:01:41.138577851 +0000 UTC Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.376190 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.376229 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.376238 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.376251 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.376261 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.479704 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.479748 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.479760 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.479778 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.479790 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.582613 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.582674 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.582690 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.582708 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.582721 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.685430 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.685480 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.685496 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.685517 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.685534 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.787878 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.788211 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.788227 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.788247 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.788261 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.890905 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.890964 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.890988 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.891013 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.891028 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.993818 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.993871 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.993880 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.993901 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:03 crc kubenswrapper[4949]: I0216 11:08:03.993912 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:03Z","lastTransitionTime":"2026-02-16T11:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.021769 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/0.log" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.021859 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jsmls" event={"ID":"3e42a398-f83a-4463-9ab7-4e093e80d744","Type":"ContainerStarted","Data":"e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.044808 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.067530 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.093609 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.097595 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.097652 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.097664 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.097685 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.097698 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.108504 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b045625-b94b-4268-b1f3-4a735fc15ace\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26a3c0d2164b3d6a615dce1de93b44a4a85b1831d656a20e5ce11909fad6d776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.125466 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.139051 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.153234 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.165808 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.180114 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.194895 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.201098 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.201154 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.201163 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.201202 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.201218 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.211695 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.221784 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.235107 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:04 crc kubenswrapper[4949]: E0216 11:08:04.235278 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.235477 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:04 crc kubenswrapper[4949]: E0216 11:08:04.235529 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.235630 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:04 crc kubenswrapper[4949]: E0216 11:08:04.235671 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.237021 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.249527 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.260049 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.270251 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.283903 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:02Z\\\",\\\"message\\\":\\\"2026-02-16T11:07:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57\\\\n2026-02-16T11:07:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57 to /host/opt/cni/bin/\\\\n2026-02-16T11:07:17Z [verbose] multus-daemon started\\\\n2026-02-16T11:07:17Z [verbose] Readiness Indicator file check\\\\n2026-02-16T11:08:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:08:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.302809 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:04Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.304489 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.304521 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.304529 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.304543 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.304553 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.346024 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 17:13:26.341375018 +0000 UTC Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.407563 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.407590 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.407599 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.407613 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.407622 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.510003 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.510057 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.510070 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.510086 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.510098 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.612614 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.612638 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.612647 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.612660 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.612670 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.715415 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.715458 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.715467 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.715482 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.715494 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.818306 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.818375 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.818388 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.818407 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.818418 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.921690 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.921782 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.921793 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.921812 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:04 crc kubenswrapper[4949]: I0216 11:08:04.921825 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:04Z","lastTransitionTime":"2026-02-16T11:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.024479 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.024532 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.024541 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.024554 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.024563 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.126451 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.126510 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.126523 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.126539 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.126550 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.229755 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.229804 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.229843 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.229875 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.229895 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.234312 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:05 crc kubenswrapper[4949]: E0216 11:08:05.234908 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.235445 4949 scope.go:117] "RemoveContainer" containerID="522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.334471 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.334582 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.334614 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.334654 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.334689 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.346511 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 09:16:47.080542255 +0000 UTC Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.437383 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.437412 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.437420 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.437433 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.437442 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.540532 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.540579 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.540591 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.540607 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.540618 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.643554 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.643589 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.643598 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.643611 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.643620 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.751776 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.751814 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.751823 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.751840 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.751849 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.854132 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.854195 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.854207 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.854222 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.854233 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.956704 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.956742 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.956753 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.956770 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:05 crc kubenswrapper[4949]: I0216 11:08:05.956783 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:05Z","lastTransitionTime":"2026-02-16T11:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.033796 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/2.log" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.036752 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.037851 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.057345 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.059469 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.059507 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.059517 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.059536 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.059547 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.070124 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.086158 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:02Z\\\",\\\"message\\\":\\\"2026-02-16T11:07:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57\\\\n2026-02-16T11:07:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57 to /host/opt/cni/bin/\\\\n2026-02-16T11:07:17Z [verbose] multus-daemon started\\\\n2026-02-16T11:07:17Z [verbose] Readiness Indicator file check\\\\n2026-02-16T11:08:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:08:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.102213 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.116248 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.131208 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.149435 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:08:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.162328 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.162775 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.162888 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.163036 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.163155 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.163628 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b045625-b94b-4268-b1f3-4a735fc15ace\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26a3c0d2164b3d6a615dce1de93b44a4a85b1831d656a20e5ce11909fad6d776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.174513 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.185694 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.197846 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.209911 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.224733 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.234650 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.234705 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:06 crc kubenswrapper[4949]: E0216 11:08:06.234798 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:06 crc kubenswrapper[4949]: E0216 11:08:06.234935 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.234679 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:06 crc kubenswrapper[4949]: E0216 11:08:06.235557 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.242218 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.254081 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.264625 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.265595 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.265626 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.265635 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.265649 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.265658 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.280091 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.287712 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:06Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.347498 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 09:08:11.163615722 +0000 UTC Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.368195 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.368425 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.368596 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.368738 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.368883 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.471717 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.471796 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.471813 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.471842 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.471862 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.574198 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.574244 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.574255 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.574271 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.574281 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.676440 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.676477 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.676487 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.676503 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.676514 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.779280 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.779340 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.779353 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.779371 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.779386 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.881843 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.882194 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.882277 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.882355 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.882432 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.989446 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.989505 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.989523 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.989547 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:06 crc kubenswrapper[4949]: I0216 11:08:06.989564 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:06Z","lastTransitionTime":"2026-02-16T11:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.042394 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/3.log" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.043002 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/2.log" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.046297 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223" exitCode=1 Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.046362 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.046421 4949 scope.go:117] "RemoveContainer" containerID="522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.047641 4949 scope.go:117] "RemoveContainer" containerID="1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223" Feb 16 11:08:07 crc kubenswrapper[4949]: E0216 11:08:07.047994 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.058842 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.074381 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:02Z\\\",\\\"message\\\":\\\"2026-02-16T11:07:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57\\\\n2026-02-16T11:07:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57 to /host/opt/cni/bin/\\\\n2026-02-16T11:07:17Z [verbose] multus-daemon started\\\\n2026-02-16T11:07:17Z [verbose] Readiness Indicator file check\\\\n2026-02-16T11:08:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:08:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.090898 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.092359 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.092410 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.092429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.092455 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.092474 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.112424 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.136939 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.152265 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.165654 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b045625-b94b-4268-b1f3-4a735fc15ace\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26a3c0d2164b3d6a615dce1de93b44a4a85b1831d656a20e5ce11909fad6d776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.178936 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.195910 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.196247 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.195961 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.196460 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.196484 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.196820 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.212628 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.234699 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:07 crc kubenswrapper[4949]: E0216 11:08:07.234867 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.251651 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://522f14e08f7f310ebb174df27f8bc434d591ff3579ba4c5f1d569497cdef4a11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:07:37Z\\\",\\\"message\\\":\\\"07:37.036667 6594 default_network_controller.go:776] Recording success event on pod openshift-image-registry/node-ca-kvrsd\\\\nI0216 11:07:37.036668 6594 factory.go:656] Stopping watch factory\\\\nI0216 11:07:37.036649 6594 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036677 6594 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036680 6594 ovnkube.go:599] Stopped ovnkube\\\\nI0216 11:07:37.036681 6594 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0216 11:07:37.036687 6594 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0216 11:07:37.036691 6594 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0216 11:07:37.036698 6594 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 165.675µs)\\\\nI0216 11:07:37.036700 6594 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0216 11:07:37.036709 6594 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0216 11:07:37.036760 6594 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:06Z\\\",\\\"message\\\":\\\"/kube-apiserver-crc\\\\nI0216 11:08:06.070353 7000 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI0216 11:08:06.070408 7000 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-26lss\\\\nI0216 11:08:06.070423 7000 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI0216 11:08:06.070445 7000 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI0216 11:08:06.070455 7000 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nF0216 11:08:06.070472 7000 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:08:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.266716 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.279193 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.291289 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.299559 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.299621 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.299632 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.299645 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.299654 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.305312 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.315861 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.332232 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.342295 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:07Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.348371 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 12:58:18.634939314 +0000 UTC Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.402372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.402423 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.402441 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.402464 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.402481 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.504993 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.505055 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.505074 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.505106 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.505130 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.607584 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.607655 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.607665 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.607681 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.607691 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.711702 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.711796 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.711824 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.711857 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.711879 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.815281 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.815339 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.815358 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.815382 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.815399 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.918367 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.918421 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.918460 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.918480 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:07 crc kubenswrapper[4949]: I0216 11:08:07.918492 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:07Z","lastTransitionTime":"2026-02-16T11:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.021059 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.021115 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.021131 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.021153 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.021190 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.054489 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/3.log" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.058887 4949 scope.go:117] "RemoveContainer" containerID="1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223" Feb 16 11:08:08 crc kubenswrapper[4949]: E0216 11:08:08.059113 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.071922 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.085798 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.097750 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.117193 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.127272 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.127344 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.127363 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.127392 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.127412 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.129961 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.145917 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.158933 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.176217 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.190366 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.205642 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.219093 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.229890 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.229965 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.229975 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.229995 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.230007 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.234361 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.234392 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.234455 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:08 crc kubenswrapper[4949]: E0216 11:08:08.234501 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:08 crc kubenswrapper[4949]: E0216 11:08:08.234701 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:08 crc kubenswrapper[4949]: E0216 11:08:08.235010 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.235030 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.266589 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:02Z\\\",\\\"message\\\":\\\"2026-02-16T11:07:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57\\\\n2026-02-16T11:07:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57 to /host/opt/cni/bin/\\\\n2026-02-16T11:07:17Z [verbose] multus-daemon started\\\\n2026-02-16T11:07:17Z [verbose] Readiness Indicator file check\\\\n2026-02-16T11:08:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:08:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.291502 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b045625-b94b-4268-b1f3-4a735fc15ace\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26a3c0d2164b3d6a615dce1de93b44a4a85b1831d656a20e5ce11909fad6d776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.314409 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.328824 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.332796 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.332908 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.332976 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.333058 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.333121 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.341845 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.348818 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 12:48:43.245495349 +0000 UTC Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.362278 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:06Z\\\",\\\"message\\\":\\\"/kube-apiserver-crc\\\\nI0216 11:08:06.070353 7000 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI0216 11:08:06.070408 7000 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-26lss\\\\nI0216 11:08:06.070423 7000 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI0216 11:08:06.070445 7000 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI0216 11:08:06.070455 7000 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nF0216 11:08:06.070472 7000 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:08:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:08Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.436273 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.436601 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.436760 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.436920 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.437084 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.540525 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.540592 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.540612 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.540638 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.540660 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.643733 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.643817 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.643829 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.643845 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.643857 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.746588 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.746642 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.746654 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.746671 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.746683 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.848637 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.848893 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.848983 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.849095 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.849201 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.952318 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.952381 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.952392 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.952409 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:08 crc kubenswrapper[4949]: I0216 11:08:08.952422 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:08Z","lastTransitionTime":"2026-02-16T11:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.003738 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.003798 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.003813 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.003853 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.003866 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: E0216 11:08:09.018630 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:09Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.022575 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.022617 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.022631 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.022649 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.022661 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: E0216 11:08:09.038157 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:09Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.042458 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.042493 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.042501 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.042515 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.042524 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: E0216 11:08:09.055074 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:09Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.058584 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.058614 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.058623 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.058638 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.058650 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: E0216 11:08:09.071980 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:09Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.076847 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.076891 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.076902 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.076918 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.076937 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: E0216 11:08:09.091084 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:09Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:09 crc kubenswrapper[4949]: E0216 11:08:09.091366 4949 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.093265 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.093352 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.093372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.093395 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.093413 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.196568 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.196642 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.196666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.196699 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.196722 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.234753 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:09 crc kubenswrapper[4949]: E0216 11:08:09.234944 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.299320 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.299382 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.299393 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.299409 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.299420 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.349743 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 05:06:09.685200273 +0000 UTC Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.413340 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.413470 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.413499 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.413527 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.413545 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.516425 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.516504 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.516529 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.516560 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.516582 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.619208 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.619257 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.619342 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.619362 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.619376 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.721945 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.721986 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.722001 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.722016 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.722027 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.824479 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.824517 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.824528 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.824544 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.824554 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.926534 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.926596 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.926607 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.926622 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:09 crc kubenswrapper[4949]: I0216 11:08:09.926632 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:09Z","lastTransitionTime":"2026-02-16T11:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.029046 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.029092 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.029100 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.029113 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.029123 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.131514 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.131554 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.131565 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.131581 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.131591 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.234240 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.234289 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.234257 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.234595 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.234634 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.234695 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.234732 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.234755 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: E0216 11:08:10.234668 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:10 crc kubenswrapper[4949]: E0216 11:08:10.234965 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:10 crc kubenswrapper[4949]: E0216 11:08:10.235041 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.337756 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.337799 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.337807 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.337821 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.337834 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.350452 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 09:33:25.218719732 +0000 UTC Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.440328 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.440394 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.440407 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.440424 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.440438 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.543545 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.543617 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.543630 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.543648 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.543694 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.646630 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.646666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.646675 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.646739 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.646752 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.749785 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.749834 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.749843 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.749858 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.749870 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.852915 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.852969 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.852980 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.853002 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.853012 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.955356 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.955423 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.955435 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.955453 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:10 crc kubenswrapper[4949]: I0216 11:08:10.955466 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:10Z","lastTransitionTime":"2026-02-16T11:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.057875 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.057905 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.057913 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.057926 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.057937 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.160725 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.160776 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.160789 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.160807 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.160820 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.234502 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:11 crc kubenswrapper[4949]: E0216 11:08:11.234642 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.249282 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b045625-b94b-4268-b1f3-4a735fc15ace\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26a3c0d2164b3d6a615dce1de93b44a4a85b1831d656a20e5ce11909fad6d776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.264216 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.264378 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.264408 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.264436 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.264460 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.267716 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.288154 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.303826 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.332080 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:06Z\\\",\\\"message\\\":\\\"/kube-apiserver-crc\\\\nI0216 11:08:06.070353 7000 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI0216 11:08:06.070408 7000 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-26lss\\\\nI0216 11:08:06.070423 7000 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI0216 11:08:06.070445 7000 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI0216 11:08:06.070455 7000 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nF0216 11:08:06.070472 7000 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:08:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.347657 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.350733 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 05:00:17.976093195 +0000 UTC Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.362958 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.367187 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.367220 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.367232 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.367252 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.367292 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.377755 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.393648 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.409063 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.439317 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.451726 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.469382 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:02Z\\\",\\\"message\\\":\\\"2026-02-16T11:07:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57\\\\n2026-02-16T11:07:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57 to /host/opt/cni/bin/\\\\n2026-02-16T11:07:17Z [verbose] multus-daemon started\\\\n2026-02-16T11:07:17Z [verbose] Readiness Indicator file check\\\\n2026-02-16T11:08:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:08:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.470678 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.470711 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.470721 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.470737 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.470748 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.481735 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.493159 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.504944 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.515390 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.523636 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:11Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.572429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.572501 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.572510 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.572545 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.572554 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.675614 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.675666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.675679 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.675696 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.675707 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.778331 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.778372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.778383 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.778401 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.778412 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.880839 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.880883 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.880891 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.880905 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.880914 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.983272 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.983336 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.983345 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.983359 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:11 crc kubenswrapper[4949]: I0216 11:08:11.983368 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:11Z","lastTransitionTime":"2026-02-16T11:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.086369 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.086415 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.086429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.086445 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.086456 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.188915 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.188959 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.188972 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.188989 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.189000 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.234192 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.234207 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:12 crc kubenswrapper[4949]: E0216 11:08:12.234311 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.234365 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:12 crc kubenswrapper[4949]: E0216 11:08:12.234387 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:12 crc kubenswrapper[4949]: E0216 11:08:12.234536 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.291863 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.291947 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.291958 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.291975 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.291986 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.351461 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 22:06:37.210822804 +0000 UTC Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.394333 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.394377 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.394391 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.394409 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.394423 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.497988 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.498050 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.498063 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.498084 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.498100 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.601361 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.601405 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.601415 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.601434 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.601449 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.704520 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.705079 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.705217 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.705340 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.705452 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.808459 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.808514 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.808528 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.808545 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.808557 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.911861 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.911929 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.911954 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.911986 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:12 crc kubenswrapper[4949]: I0216 11:08:12.912009 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:12Z","lastTransitionTime":"2026-02-16T11:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.014494 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.014583 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.014594 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.014615 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.014630 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.117095 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.117163 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.117195 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.117224 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.117248 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.220203 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.220715 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.220730 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.220754 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.220771 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.234680 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:13 crc kubenswrapper[4949]: E0216 11:08:13.234831 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.323229 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.323296 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.323313 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.323339 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.323360 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.352268 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 02:22:43.67470356 +0000 UTC Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.426448 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.426496 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.426512 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.426536 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.426556 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.528645 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.528688 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.528699 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.528718 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.528730 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.631842 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.631887 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.631900 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.631946 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.631961 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.735469 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.735521 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.735536 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.735555 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.735571 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.838168 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.838214 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.838224 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.838263 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.838273 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.941811 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.941863 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.941875 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.941897 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:13 crc kubenswrapper[4949]: I0216 11:08:13.941912 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:13Z","lastTransitionTime":"2026-02-16T11:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.045683 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.045759 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.045783 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.045812 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.045835 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.149515 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.149565 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.149575 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.149594 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.149607 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.234778 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.234933 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:14 crc kubenswrapper[4949]: E0216 11:08:14.235003 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.235031 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:14 crc kubenswrapper[4949]: E0216 11:08:14.235137 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:14 crc kubenswrapper[4949]: E0216 11:08:14.235293 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.252216 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.252267 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.252290 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.252322 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.252347 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.353363 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 08:19:38.880641926 +0000 UTC Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.355154 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.355247 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.355272 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.355306 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.355330 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.458927 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.458986 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.459006 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.459030 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.459049 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.562376 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.562429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.562447 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.562469 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.562487 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.665875 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.665940 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.665953 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.665979 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.665997 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.769332 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.769386 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.769396 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.769414 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.769424 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.872873 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.872947 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.872972 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.873005 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.873029 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.975959 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.976023 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.976041 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.976065 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:14 crc kubenswrapper[4949]: I0216 11:08:14.976089 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:14Z","lastTransitionTime":"2026-02-16T11:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.078306 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.078371 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.078388 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.078411 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.078428 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.180469 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.180535 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.180560 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.180589 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.180614 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.234434 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:15 crc kubenswrapper[4949]: E0216 11:08:15.234688 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.283084 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.283155 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.283227 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.283259 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.283280 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.354281 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 11:51:51.314057895 +0000 UTC Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.386845 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.386891 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.386911 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.386935 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.386953 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.489627 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.489983 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.490233 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.490461 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.490658 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.594063 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.594140 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.594163 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.594239 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.594261 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.696840 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.696908 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.696931 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.696964 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.696987 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.799938 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.800020 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.800037 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.800066 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.800083 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.904100 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.904209 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.904227 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.904252 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:15 crc kubenswrapper[4949]: I0216 11:08:15.904268 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:15Z","lastTransitionTime":"2026-02-16T11:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.007084 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.007149 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.007195 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.007226 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.007250 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.102877 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.103058 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.103273 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.103229439 +0000 UTC m=+149.732563644 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.103368 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.103430 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.103466 4949 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.103492 4949 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.103129 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.103580 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.103544418 +0000 UTC m=+149.732878653 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.103625 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.1036038 +0000 UTC m=+149.732938115 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.103765 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.103972 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.103989 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.104060 4949 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.104103 4949 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.104130 4949 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.104149 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.104118255 +0000 UTC m=+149.733452460 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.104262 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.104236328 +0000 UTC m=+149.733570523 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.109984 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.110034 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.110052 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.110076 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.110094 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.212348 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.212414 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.212431 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.212453 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.212469 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.234798 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.234856 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.234825 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.235004 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.235073 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:16 crc kubenswrapper[4949]: E0216 11:08:16.235132 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.314699 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.314751 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.314768 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.314791 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.314811 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.354679 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 22:36:01.950737794 +0000 UTC Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.418366 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.418433 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.418456 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.418482 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.418508 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.521129 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.521234 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.521257 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.521280 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.521294 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.623335 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.623411 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.623433 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.623461 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.623483 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.725739 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.725777 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.725790 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.725805 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.725814 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.828929 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.828974 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.828985 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.829001 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.829012 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.931055 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.931112 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.931132 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.931156 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:16 crc kubenswrapper[4949]: I0216 11:08:16.931216 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:16Z","lastTransitionTime":"2026-02-16T11:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.033725 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.033788 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.033801 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.033823 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.033840 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.136411 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.136471 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.136480 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.136511 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.136526 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.234709 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:17 crc kubenswrapper[4949]: E0216 11:08:17.234903 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.239363 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.239393 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.239404 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.239419 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.239432 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.341664 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.341731 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.341743 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.341772 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.341794 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.355198 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 00:20:17.730198415 +0000 UTC Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.445450 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.445605 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.445627 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.445654 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.445672 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.548900 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.549156 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.549182 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.549202 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.549225 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.652200 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.652241 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.652250 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.652262 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.652273 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.755771 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.755833 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.755848 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.755901 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.755917 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.858293 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.858347 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.858359 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.858374 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.858389 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.961300 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.961359 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.961377 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.961399 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:17 crc kubenswrapper[4949]: I0216 11:08:17.961417 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:17Z","lastTransitionTime":"2026-02-16T11:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.065726 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.065789 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.065800 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.065822 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.065837 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.169912 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.169973 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.169984 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.170008 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.170024 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.234437 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.234437 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.234543 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:18 crc kubenswrapper[4949]: E0216 11:08:18.235040 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:18 crc kubenswrapper[4949]: E0216 11:08:18.235199 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:18 crc kubenswrapper[4949]: E0216 11:08:18.235059 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.279500 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.279547 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.279565 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.279591 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.279609 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.356091 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 07:16:27.608905728 +0000 UTC Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.382532 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.382572 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.382588 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.382753 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.382810 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.484870 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.484915 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.484929 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.484945 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.484957 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.587328 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.587364 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.587374 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.587390 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.587401 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.690199 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.690244 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.690255 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.690271 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.690282 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.792819 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.792888 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.792911 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.792941 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.792964 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.895919 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.895983 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.896021 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.896057 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.896081 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.999400 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.999453 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.999468 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.999490 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:18 crc kubenswrapper[4949]: I0216 11:08:18.999509 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:18Z","lastTransitionTime":"2026-02-16T11:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.102665 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.102720 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.102730 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.102751 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.102763 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.205799 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.205864 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.205882 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.205911 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.205930 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.234649 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:19 crc kubenswrapper[4949]: E0216 11:08:19.234906 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.273436 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.273499 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.273516 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.273586 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.273603 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: E0216 11:08:19.294215 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.298531 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.298581 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.298598 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.298623 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.298640 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: E0216 11:08:19.318944 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.325025 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.325087 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.325106 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.325131 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.325151 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: E0216 11:08:19.344630 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.349349 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.349404 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.349451 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.349474 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.349491 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.357092 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 22:49:24.172894628 +0000 UTC Feb 16 11:08:19 crc kubenswrapper[4949]: E0216 11:08:19.366464 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.370859 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.370892 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.370905 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.370924 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.370935 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: E0216 11:08:19.389414 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc77a723-71f9-4f4a-b80e-2feb50c63f04\\\",\\\"systemUUID\\\":\\\"fcf7eef6-e236-4c8e-bd9c-41b70a7621ed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:19Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:19 crc kubenswrapper[4949]: E0216 11:08:19.389546 4949 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.391490 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.391518 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.391528 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.391541 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.391552 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.493597 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.493694 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.493711 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.493735 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.493750 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.596660 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.596721 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.596737 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.596759 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.596777 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.699294 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.699358 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.699377 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.699401 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.699421 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.802685 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.802749 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.802766 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.802789 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.802870 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.905561 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.905622 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.905639 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.905662 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:19 crc kubenswrapper[4949]: I0216 11:08:19.905679 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:19Z","lastTransitionTime":"2026-02-16T11:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.008072 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.008132 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.008143 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.008159 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.008190 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.110927 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.111210 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.111285 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.111410 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.111490 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.214200 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.214259 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.214276 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.214300 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.214317 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.234485 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.234603 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:20 crc kubenswrapper[4949]: E0216 11:08:20.234699 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.234615 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:20 crc kubenswrapper[4949]: E0216 11:08:20.234835 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:20 crc kubenswrapper[4949]: E0216 11:08:20.234964 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.316738 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.316794 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.316813 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.316835 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.316852 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.357811 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 13:36:04.954613046 +0000 UTC Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.420740 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.420803 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.420820 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.420847 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.420864 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.524266 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.524302 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.524310 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.524326 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.524335 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.627395 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.627466 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.627482 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.627507 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.627523 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.730997 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.731039 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.731048 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.731062 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.731074 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.833275 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.833321 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.833331 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.833345 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.833356 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.935292 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.935373 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.935386 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.935415 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:20 crc kubenswrapper[4949]: I0216 11:08:20.935439 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:20Z","lastTransitionTime":"2026-02-16T11:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.038427 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.038497 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.038514 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.038540 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.038558 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.141149 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.141210 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.141220 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.141236 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.141250 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.234871 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:21 crc kubenswrapper[4949]: E0216 11:08:21.235075 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.237307 4949 scope.go:117] "RemoveContainer" containerID="1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223" Feb 16 11:08:21 crc kubenswrapper[4949]: E0216 11:08:21.238116 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.244564 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.244625 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.244647 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.244678 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.244696 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.252507 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://317e85af8628e3ae9d104c6faacfd4ce6b66adc7d3f0e426433f60118fffa894\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://43a9246b1c43b2bfb063c1d8aded507f620472bb98ffc99e91115e1189478807\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.268435 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eff651eda112458195ec8e6bc6ecdf362f44a44f7f8be136f631153251f278c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.291668 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:06Z\\\",\\\"message\\\":\\\"/kube-apiserver-crc\\\\nI0216 11:08:06.070353 7000 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI0216 11:08:06.070408 7000 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-26lss\\\\nI0216 11:08:06.070423 7000 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI0216 11:08:06.070445 7000 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI0216 11:08:06.070455 7000 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nF0216 11:08:06.070472 7000 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:08:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qzd7d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gfr2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.303072 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5b045625-b94b-4268-b1f3-4a735fc15ace\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://26a3c0d2164b3d6a615dce1de93b44a4a85b1831d656a20e5ce11909fad6d776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfced29d64a36faa7fb01921de2fa10276f96c8bda9ca028442ff93523a0e3e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.315004 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5554e66b-3309-4de5-b983-47e39d83fd75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b0f44fbd5ac8ebc2187917178b0999e0eb7837a1c097691c9667669486f5a01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e29b88847d04a477cf98987a7df615c6de0a6d9749cf7bdac9569f10360fc5e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb35c1de6926d4a26d187408bff5edd0286e53989f9b76e8c589428b397f9f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2ffd2fb061548169d3abaf8402343cafbc3e5ae03a850efdbd2d58416044185\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.327307 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39ca5ab7-457c-4404-a3eb-f6acce74843b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d3e706e8ad75d6c7419dc41073b0842faf175f1e153735a12e3ee7243676253\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpzms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-26lss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.337854 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75391841-4aa8-4f03-b696-23ac283642c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a92a94b385e42e72f4c48c4c2a8ddba220c077158d0f72e099e562d3b84bf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3201fb16e33f256fad3eed7af0783c71ccb51ca75e6734c0745fada820853b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bql55\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cs472\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.346269 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.346325 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.346337 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.346357 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.346372 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.348608 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"965b4f20-8786-4c47-8721-c348942551d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktpql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:27Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6v4x7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.358313 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 20:51:25.852869569 +0000 UTC Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.359867 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.370190 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.388692 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ff1c788-4b66-48e9-8178-006f231d264c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87041b3d12c1149f97ab42c0edec7c62aaee2e50309ad8c236b1842a6164bf79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a196649795d56a51c8ba77d0ce44d98f9157a5479dd2f46f827d12aae4c972ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20d26c14a91de559e7275af8a7f0c8a4718a64b9bd74f472619f108110af4243\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516785c5baca88a2150eaecfb9887ca33e825bbeb4ec27e82df10b49771ff5c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd58cdca3bd6ac3285810d4e6fbc19cf7fd7546285f5e59046bee39e48693414\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d83ba9996ba5c3ceb2bbcc2a9ad48391d40c0d0f4d9184a7da8308f4d16b9bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://04f2249ea74a94a13c4318ab61f1ebbe53b14af7f7d28de4d193fdbe07598edf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:07:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7ksw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vjrxd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.400666 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kvrsd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e1092c7-896f-4334-b157-ac007cdb0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a5d6a2719dc30352250a07c1028c552b8f73c26d421b869f2e8ddc74fb9dcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8v7hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:15Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kvrsd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.416985 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abcc0a5c-3dfa-48fe-9df1-4f941d9d811c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://afad26039cce493d107df9286cf3268dfc5f76d20b86bb34a36ef7742b8419bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9f247fc563e96b2d5c1e866afe8eef00f61520018ad001e6b02cffac286d3ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2296e3c50624ffcfe10eb50ae71f715af1868bfa777c60068d3ef55a2544af00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.434979 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1997fe9cefd6bfcd634b77dad76d5d7c814318127f2be93b489d7866808c57ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.449118 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.450461 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.450504 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.450518 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.450560 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.450576 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.459656 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kxn9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc3f701c-2094-4818-871c-547fc5636a55\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://993e593b3f31361b71cff339c39e1bc66b6fbbeb8a1d125976254ec852dcd919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rld7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kxn9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.475004 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jsmls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e42a398-f83a-4463-9ab7-4e093e80d744\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:08:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-16T11:08:02Z\\\",\\\"message\\\":\\\"2026-02-16T11:07:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57\\\\n2026-02-16T11:07:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7dce261c-32ce-4859-b0c0-e6188b149e57 to /host/opt/cni/bin/\\\\n2026-02-16T11:07:17Z [verbose] multus-daemon started\\\\n2026-02-16T11:07:17Z [verbose] Readiness Indicator file check\\\\n2026-02-16T11:08:02Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:07:14Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:08:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7q7gk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:07:13Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jsmls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.490860 4949 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aada2690-8d5f-4854-bc83-59906010e8ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-16T11:06:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-16T11:07:11Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0216 11:07:05.819935 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0216 11:07:05.820709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2063297113/tls.crt::/tmp/serving-cert-2063297113/tls.key\\\\\\\"\\\\nI0216 11:07:11.242838 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0216 11:07:11.246739 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0216 11:07:11.246759 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0216 11:07:11.246780 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0216 11:07:11.246785 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0216 11:07:11.257100 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0216 11:07:11.257140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0216 11:07:11.257148 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0216 11:07:11.257148 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0216 11:07:11.257158 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0216 11:07:11.257195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0216 11:07:11.257203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0216 11:07:11.257208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0216 11:07:11.259242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-16T11:06:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-16T11:06:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-16T11:06:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-16T11:06:51Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-16T11:08:21Z is after 2025-08-24T17:21:41Z" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.553492 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.553801 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.553897 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.553984 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.554072 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.656950 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.657031 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.657047 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.657069 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.657084 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.759944 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.760001 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.760011 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.760028 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.760038 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.862526 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.862565 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.862576 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.862592 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.862604 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.964692 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.964742 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.964753 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.964767 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:21 crc kubenswrapper[4949]: I0216 11:08:21.964775 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:21Z","lastTransitionTime":"2026-02-16T11:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.066848 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.066903 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.067110 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.067128 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.067142 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.169815 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.169933 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.169944 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.169958 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.169967 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.234732 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.234829 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.234889 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:22 crc kubenswrapper[4949]: E0216 11:08:22.235029 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:22 crc kubenswrapper[4949]: E0216 11:08:22.235133 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:22 crc kubenswrapper[4949]: E0216 11:08:22.235497 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.248674 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.271516 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.271560 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.271569 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.271582 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.271590 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.358427 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 09:47:30.321174151 +0000 UTC Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.373468 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.373509 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.373517 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.373530 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.373540 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.478213 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.478277 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.478303 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.478336 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.478355 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.581090 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.581230 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.581257 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.581286 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.581304 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.684774 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.684827 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.684846 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.684870 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.684890 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.788087 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.788147 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.788163 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.788208 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.788224 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.891335 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.891394 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.891418 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.891448 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.891469 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.994494 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.994566 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.994590 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.994620 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:22 crc kubenswrapper[4949]: I0216 11:08:22.994646 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:22Z","lastTransitionTime":"2026-02-16T11:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.097243 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.097286 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.097301 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.097321 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.097335 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.200362 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.200414 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.200456 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.200474 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.200486 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.234941 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:23 crc kubenswrapper[4949]: E0216 11:08:23.235069 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.302910 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.302948 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.302958 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.302971 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.302979 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.359509 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 11:41:21.095724063 +0000 UTC Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.405833 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.405875 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.405883 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.405897 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.405906 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.508616 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.508667 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.508685 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.508707 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.508726 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.611350 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.611409 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.611432 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.611461 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.611483 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.715354 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.715412 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.715429 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.715449 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.715463 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.818190 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.818224 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.818232 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.818244 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.818253 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.922520 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.922569 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.922580 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.922596 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:23 crc kubenswrapper[4949]: I0216 11:08:23.922606 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:23Z","lastTransitionTime":"2026-02-16T11:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.025869 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.025964 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.025982 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.026012 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.026032 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.129741 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.129822 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.129845 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.129876 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.129896 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.233264 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.233320 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.233333 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.233348 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.233359 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.234449 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.234608 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.234651 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:24 crc kubenswrapper[4949]: E0216 11:08:24.234803 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:24 crc kubenswrapper[4949]: E0216 11:08:24.234856 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:24 crc kubenswrapper[4949]: E0216 11:08:24.234925 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.337028 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.337083 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.337094 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.337115 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.337133 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.360528 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 09:11:38.346435137 +0000 UTC Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.440654 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.440708 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.440719 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.440745 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.440765 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.543473 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.543520 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.543532 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.543552 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.543564 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.647102 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.647213 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.647243 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.647278 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.647304 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.751038 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.751089 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.751106 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.751132 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.751152 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.854825 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.854925 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.854940 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.854965 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.854979 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.957726 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.957788 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.957810 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.957839 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:24 crc kubenswrapper[4949]: I0216 11:08:24.957859 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:24Z","lastTransitionTime":"2026-02-16T11:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.061443 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.061486 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.061503 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.061525 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.061541 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.164951 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.165000 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.165017 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.165039 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.165056 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.234311 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:25 crc kubenswrapper[4949]: E0216 11:08:25.234549 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.268442 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.268522 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.268546 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.268573 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.268594 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.360698 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 05:40:14.243745648 +0000 UTC Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.371211 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.371266 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.371285 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.371307 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.371326 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.474333 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.474405 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.474441 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.474474 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.474497 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.577949 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.578014 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.578032 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.578059 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.578078 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.681703 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.681772 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.681788 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.681814 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.681830 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.784847 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.784912 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.784922 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.784943 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.784956 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.887743 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.887814 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.887825 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.887849 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.887864 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.991099 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.991206 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.991226 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.991261 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:25 crc kubenswrapper[4949]: I0216 11:08:25.991302 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:25Z","lastTransitionTime":"2026-02-16T11:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.094330 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.094397 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.094411 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.094440 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.094455 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.198604 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.198659 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.198670 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.198689 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.198698 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.235654 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.235752 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:26 crc kubenswrapper[4949]: E0216 11:08:26.236059 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:26 crc kubenswrapper[4949]: E0216 11:08:26.236227 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.235842 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:26 crc kubenswrapper[4949]: E0216 11:08:26.236684 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.301253 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.301311 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.301321 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.301345 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.301358 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.361071 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 12:12:56.320696461 +0000 UTC Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.405154 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.405225 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.405242 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.405265 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.405285 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.508103 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.508206 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.508225 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.508253 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.508274 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.610784 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.610873 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.610896 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.610930 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.610954 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.714985 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.715048 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.715061 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.715076 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.715362 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.817971 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.818004 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.818012 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.818025 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.818035 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.920378 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.920666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.920676 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.920694 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:26 crc kubenswrapper[4949]: I0216 11:08:26.920706 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:26Z","lastTransitionTime":"2026-02-16T11:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.023648 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.023683 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.023692 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.023711 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.023722 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.126531 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.126603 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.126619 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.126640 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.126662 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.229153 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.229231 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.229243 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.229260 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.229270 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.234909 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:27 crc kubenswrapper[4949]: E0216 11:08:27.235135 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.331892 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.331938 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.331948 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.331995 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.332009 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.361367 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 00:10:52.101749612 +0000 UTC Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.435416 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.435474 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.435488 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.435511 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.435525 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.539413 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.539469 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.539487 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.539550 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.539562 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.642571 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.642632 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.642651 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.642677 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.642693 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.745817 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.745902 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.745921 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.745946 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.745993 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.848664 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.848960 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.848974 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.848990 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.849000 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.951819 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.952110 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.952190 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.952265 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:27 crc kubenswrapper[4949]: I0216 11:08:27.952324 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:27Z","lastTransitionTime":"2026-02-16T11:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.055237 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.055610 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.055750 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.055894 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.056132 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:28Z","lastTransitionTime":"2026-02-16T11:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.159877 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.159934 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.159955 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.159982 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.160005 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:28Z","lastTransitionTime":"2026-02-16T11:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.234539 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:28 crc kubenswrapper[4949]: E0216 11:08:28.234649 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.234551 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.234540 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:28 crc kubenswrapper[4949]: E0216 11:08:28.234714 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:28 crc kubenswrapper[4949]: E0216 11:08:28.234926 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.262335 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.262377 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.262389 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.262406 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:28 crc kubenswrapper[4949]: I0216 11:08:28.262419 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:28Z","lastTransitionTime":"2026-02-16T11:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.044708 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 07:55:45.972054661 +0000 UTC Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.047986 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.048012 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.048020 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.048033 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.048043 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:29Z","lastTransitionTime":"2026-02-16T11:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.150316 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.150372 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.150387 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.150408 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.150422 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:29Z","lastTransitionTime":"2026-02-16T11:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.235347 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:29 crc kubenswrapper[4949]: E0216 11:08:29.235825 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.253290 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.253326 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.253335 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.253351 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.253360 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:29Z","lastTransitionTime":"2026-02-16T11:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.356581 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.356632 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.356643 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.356666 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.356680 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:29Z","lastTransitionTime":"2026-02-16T11:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.446061 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.446122 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.446140 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.446164 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.446215 4949 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-16T11:08:29Z","lastTransitionTime":"2026-02-16T11:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.502954 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l"] Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.503757 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.506232 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.508861 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.509741 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.509954 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.540068 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-vjrxd" podStartSLOduration=77.540041511 podStartE2EDuration="1m17.540041511s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.527678835 +0000 UTC m=+99.157013010" watchObservedRunningTime="2026-02-16 11:08:29.540041511 +0000 UTC m=+99.169375696" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.541072 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-kvrsd" podStartSLOduration=77.540966707 podStartE2EDuration="1m17.540966707s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.53965879 +0000 UTC m=+99.168992965" watchObservedRunningTime="2026-02-16 11:08:29.540966707 +0000 UTC m=+99.170300892" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.551367 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/716c9526-6989-4da0-86fb-f97ec2024b34-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.551467 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/716c9526-6989-4da0-86fb-f97ec2024b34-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.551533 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/716c9526-6989-4da0-86fb-f97ec2024b34-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.551636 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/716c9526-6989-4da0-86fb-f97ec2024b34-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.551685 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/716c9526-6989-4da0-86fb-f97ec2024b34-service-ca\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.556743 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-kxn9z" podStartSLOduration=77.556718471 podStartE2EDuration="1m17.556718471s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.55634002 +0000 UTC m=+99.185674225" watchObservedRunningTime="2026-02-16 11:08:29.556718471 +0000 UTC m=+99.186052676" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.577440 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-jsmls" podStartSLOduration=77.577423667 podStartE2EDuration="1m17.577423667s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.576424169 +0000 UTC m=+99.205758354" watchObservedRunningTime="2026-02-16 11:08:29.577423667 +0000 UTC m=+99.206757862" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.633202 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=7.633143852 podStartE2EDuration="7.633143852s" podCreationTimestamp="2026-02-16 11:08:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.63098004 +0000 UTC m=+99.260314275" watchObservedRunningTime="2026-02-16 11:08:29.633143852 +0000 UTC m=+99.262478057" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.649603 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=78.649578436 podStartE2EDuration="1m18.649578436s" podCreationTimestamp="2026-02-16 11:07:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.648361111 +0000 UTC m=+99.277695356" watchObservedRunningTime="2026-02-16 11:08:29.649578436 +0000 UTC m=+99.278912611" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.652514 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/716c9526-6989-4da0-86fb-f97ec2024b34-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.652552 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/716c9526-6989-4da0-86fb-f97ec2024b34-service-ca\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.652600 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/716c9526-6989-4da0-86fb-f97ec2024b34-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.652642 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/716c9526-6989-4da0-86fb-f97ec2024b34-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.652664 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/716c9526-6989-4da0-86fb-f97ec2024b34-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.652725 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/716c9526-6989-4da0-86fb-f97ec2024b34-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.652819 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/716c9526-6989-4da0-86fb-f97ec2024b34-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.654099 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/716c9526-6989-4da0-86fb-f97ec2024b34-service-ca\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.663941 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/716c9526-6989-4da0-86fb-f97ec2024b34-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.675433 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/716c9526-6989-4da0-86fb-f97ec2024b34-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-sgs7l\" (UID: \"716c9526-6989-4da0-86fb-f97ec2024b34\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.688456 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=73.688431075 podStartE2EDuration="1m13.688431075s" podCreationTimestamp="2026-02-16 11:07:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.670279192 +0000 UTC m=+99.299613387" watchObservedRunningTime="2026-02-16 11:08:29.688431075 +0000 UTC m=+99.317765270" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.713630 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=28.71361048 podStartE2EDuration="28.71361048s" podCreationTimestamp="2026-02-16 11:08:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.713320122 +0000 UTC m=+99.342654317" watchObservedRunningTime="2026-02-16 11:08:29.71361048 +0000 UTC m=+99.342944655" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.726365 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=47.726348567 podStartE2EDuration="47.726348567s" podCreationTimestamp="2026-02-16 11:07:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.726037348 +0000 UTC m=+99.355371523" watchObservedRunningTime="2026-02-16 11:08:29.726348567 +0000 UTC m=+99.355682732" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.822138 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.829282 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podStartSLOduration=77.829262981 podStartE2EDuration="1m17.829262981s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.828973043 +0000 UTC m=+99.458307208" watchObservedRunningTime="2026-02-16 11:08:29.829262981 +0000 UTC m=+99.458597146" Feb 16 11:08:29 crc kubenswrapper[4949]: I0216 11:08:29.849742 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cs472" podStartSLOduration=76.8497216 podStartE2EDuration="1m16.8497216s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:29.849661448 +0000 UTC m=+99.478995613" watchObservedRunningTime="2026-02-16 11:08:29.8497216 +0000 UTC m=+99.479055765" Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.045410 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 21:42:38.240931354 +0000 UTC Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.045513 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.055726 4949 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.131882 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" event={"ID":"716c9526-6989-4da0-86fb-f97ec2024b34","Type":"ContainerStarted","Data":"27953de7ad40388f007cdf4ec843534efe3126352df9da0c4672e967cb0b29cf"} Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.131953 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" event={"ID":"716c9526-6989-4da0-86fb-f97ec2024b34","Type":"ContainerStarted","Data":"aae1fe9eff08b7a8984e22dd59cb7b4b02b38d4b82c9459fe157f1dd37abc3cb"} Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.147144 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sgs7l" podStartSLOduration=78.147113685 podStartE2EDuration="1m18.147113685s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:30.147014252 +0000 UTC m=+99.776348427" watchObservedRunningTime="2026-02-16 11:08:30.147113685 +0000 UTC m=+99.776447850" Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.234947 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.235045 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:30 crc kubenswrapper[4949]: I0216 11:08:30.235066 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:30 crc kubenswrapper[4949]: E0216 11:08:30.235228 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:30 crc kubenswrapper[4949]: E0216 11:08:30.235557 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:30 crc kubenswrapper[4949]: E0216 11:08:30.235689 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:31 crc kubenswrapper[4949]: I0216 11:08:31.168494 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:31 crc kubenswrapper[4949]: E0216 11:08:31.168639 4949 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:08:31 crc kubenswrapper[4949]: E0216 11:08:31.168702 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs podName:965b4f20-8786-4c47-8721-c348942551d6 nodeName:}" failed. No retries permitted until 2026-02-16 11:09:35.168684048 +0000 UTC m=+164.798018233 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs") pod "network-metrics-daemon-6v4x7" (UID: "965b4f20-8786-4c47-8721-c348942551d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 16 11:08:31 crc kubenswrapper[4949]: I0216 11:08:31.235110 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:31 crc kubenswrapper[4949]: E0216 11:08:31.238800 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:32 crc kubenswrapper[4949]: I0216 11:08:32.234526 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:32 crc kubenswrapper[4949]: E0216 11:08:32.234916 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:32 crc kubenswrapper[4949]: I0216 11:08:32.234562 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:32 crc kubenswrapper[4949]: I0216 11:08:32.234562 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:32 crc kubenswrapper[4949]: E0216 11:08:32.234992 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:32 crc kubenswrapper[4949]: E0216 11:08:32.235216 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:33 crc kubenswrapper[4949]: I0216 11:08:33.234579 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:33 crc kubenswrapper[4949]: E0216 11:08:33.234759 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:33 crc kubenswrapper[4949]: I0216 11:08:33.235777 4949 scope.go:117] "RemoveContainer" containerID="1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223" Feb 16 11:08:33 crc kubenswrapper[4949]: E0216 11:08:33.235972 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gfr2q_openshift-ovn-kubernetes(3f545ae8-1b14-4abd-b4ea-844f6ae7b54d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" Feb 16 11:08:34 crc kubenswrapper[4949]: I0216 11:08:34.234634 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:34 crc kubenswrapper[4949]: E0216 11:08:34.234982 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:34 crc kubenswrapper[4949]: I0216 11:08:34.234784 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:34 crc kubenswrapper[4949]: I0216 11:08:34.234690 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:34 crc kubenswrapper[4949]: E0216 11:08:34.235322 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:34 crc kubenswrapper[4949]: E0216 11:08:34.235397 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:35 crc kubenswrapper[4949]: I0216 11:08:35.235022 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:35 crc kubenswrapper[4949]: E0216 11:08:35.235246 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:36 crc kubenswrapper[4949]: I0216 11:08:36.235002 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:36 crc kubenswrapper[4949]: I0216 11:08:36.235109 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:36 crc kubenswrapper[4949]: I0216 11:08:36.235026 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:36 crc kubenswrapper[4949]: E0216 11:08:36.235207 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:36 crc kubenswrapper[4949]: E0216 11:08:36.235385 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:36 crc kubenswrapper[4949]: E0216 11:08:36.235550 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:37 crc kubenswrapper[4949]: I0216 11:08:37.234456 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:37 crc kubenswrapper[4949]: E0216 11:08:37.234794 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:38 crc kubenswrapper[4949]: I0216 11:08:38.234948 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:38 crc kubenswrapper[4949]: I0216 11:08:38.234981 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:38 crc kubenswrapper[4949]: E0216 11:08:38.235064 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:38 crc kubenswrapper[4949]: I0216 11:08:38.235106 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:38 crc kubenswrapper[4949]: E0216 11:08:38.235240 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:38 crc kubenswrapper[4949]: E0216 11:08:38.235331 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:39 crc kubenswrapper[4949]: I0216 11:08:39.235158 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:39 crc kubenswrapper[4949]: E0216 11:08:39.235452 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:40 crc kubenswrapper[4949]: I0216 11:08:40.234883 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:40 crc kubenswrapper[4949]: I0216 11:08:40.234916 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:40 crc kubenswrapper[4949]: E0216 11:08:40.235083 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:40 crc kubenswrapper[4949]: I0216 11:08:40.235154 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:40 crc kubenswrapper[4949]: E0216 11:08:40.235266 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:40 crc kubenswrapper[4949]: E0216 11:08:40.235437 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:41 crc kubenswrapper[4949]: I0216 11:08:41.234821 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:41 crc kubenswrapper[4949]: E0216 11:08:41.236409 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:42 crc kubenswrapper[4949]: I0216 11:08:42.235259 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:42 crc kubenswrapper[4949]: E0216 11:08:42.235557 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:42 crc kubenswrapper[4949]: I0216 11:08:42.235260 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:42 crc kubenswrapper[4949]: E0216 11:08:42.235685 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:42 crc kubenswrapper[4949]: I0216 11:08:42.235327 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:42 crc kubenswrapper[4949]: E0216 11:08:42.235778 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:43 crc kubenswrapper[4949]: I0216 11:08:43.234649 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:43 crc kubenswrapper[4949]: E0216 11:08:43.234832 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:44 crc kubenswrapper[4949]: I0216 11:08:44.235382 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:44 crc kubenswrapper[4949]: I0216 11:08:44.235635 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:44 crc kubenswrapper[4949]: I0216 11:08:44.235627 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:44 crc kubenswrapper[4949]: E0216 11:08:44.235821 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:44 crc kubenswrapper[4949]: E0216 11:08:44.236030 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:44 crc kubenswrapper[4949]: E0216 11:08:44.236152 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:45 crc kubenswrapper[4949]: I0216 11:08:45.234341 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:45 crc kubenswrapper[4949]: E0216 11:08:45.234527 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:46 crc kubenswrapper[4949]: I0216 11:08:46.234728 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:46 crc kubenswrapper[4949]: E0216 11:08:46.234943 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:46 crc kubenswrapper[4949]: I0216 11:08:46.235062 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:46 crc kubenswrapper[4949]: E0216 11:08:46.235356 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:46 crc kubenswrapper[4949]: I0216 11:08:46.235471 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:46 crc kubenswrapper[4949]: E0216 11:08:46.235852 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:47 crc kubenswrapper[4949]: I0216 11:08:47.235161 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:47 crc kubenswrapper[4949]: E0216 11:08:47.235714 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:48 crc kubenswrapper[4949]: I0216 11:08:48.235285 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:48 crc kubenswrapper[4949]: I0216 11:08:48.235335 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:48 crc kubenswrapper[4949]: I0216 11:08:48.235287 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:48 crc kubenswrapper[4949]: E0216 11:08:48.235934 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:48 crc kubenswrapper[4949]: E0216 11:08:48.236084 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:48 crc kubenswrapper[4949]: E0216 11:08:48.236258 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:48 crc kubenswrapper[4949]: I0216 11:08:48.236651 4949 scope.go:117] "RemoveContainer" containerID="1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223" Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.052777 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-6v4x7"] Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.053491 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:49 crc kubenswrapper[4949]: E0216 11:08:49.053668 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.217313 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/3.log" Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.222316 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerStarted","Data":"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309"} Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.223162 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.224591 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/1.log" Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.225368 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/0.log" Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.225465 4949 generic.go:334] "Generic (PLEG): container finished" podID="3e42a398-f83a-4463-9ab7-4e093e80d744" containerID="e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee" exitCode=1 Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.225523 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jsmls" event={"ID":"3e42a398-f83a-4463-9ab7-4e093e80d744","Type":"ContainerDied","Data":"e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee"} Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.225592 4949 scope.go:117] "RemoveContainer" containerID="9e3e0579c66283751490a28d6f93146aa4b7a000219c17747fe2041a778fe2ff" Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.226132 4949 scope.go:117] "RemoveContainer" containerID="e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee" Feb 16 11:08:49 crc kubenswrapper[4949]: E0216 11:08:49.226371 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-jsmls_openshift-multus(3e42a398-f83a-4463-9ab7-4e093e80d744)\"" pod="openshift-multus/multus-jsmls" podUID="3e42a398-f83a-4463-9ab7-4e093e80d744" Feb 16 11:08:49 crc kubenswrapper[4949]: I0216 11:08:49.277255 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podStartSLOduration=97.277229865 podStartE2EDuration="1m37.277229865s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:08:49.275400502 +0000 UTC m=+118.904734997" watchObservedRunningTime="2026-02-16 11:08:49.277229865 +0000 UTC m=+118.906564070" Feb 16 11:08:50 crc kubenswrapper[4949]: I0216 11:08:50.234284 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/1.log" Feb 16 11:08:50 crc kubenswrapper[4949]: I0216 11:08:50.234685 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:50 crc kubenswrapper[4949]: I0216 11:08:50.234804 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:50 crc kubenswrapper[4949]: I0216 11:08:50.235304 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:50 crc kubenswrapper[4949]: I0216 11:08:50.235407 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:50 crc kubenswrapper[4949]: E0216 11:08:50.235566 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:50 crc kubenswrapper[4949]: E0216 11:08:50.236143 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:50 crc kubenswrapper[4949]: E0216 11:08:50.236406 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:50 crc kubenswrapper[4949]: E0216 11:08:50.236631 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:51 crc kubenswrapper[4949]: E0216 11:08:51.185388 4949 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Feb 16 11:08:51 crc kubenswrapper[4949]: E0216 11:08:51.494480 4949 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:08:52 crc kubenswrapper[4949]: I0216 11:08:52.234804 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:52 crc kubenswrapper[4949]: I0216 11:08:52.234940 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:52 crc kubenswrapper[4949]: I0216 11:08:52.235023 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:52 crc kubenswrapper[4949]: E0216 11:08:52.235040 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:52 crc kubenswrapper[4949]: E0216 11:08:52.235154 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:52 crc kubenswrapper[4949]: I0216 11:08:52.234934 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:52 crc kubenswrapper[4949]: E0216 11:08:52.235529 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:52 crc kubenswrapper[4949]: E0216 11:08:52.235784 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:54 crc kubenswrapper[4949]: I0216 11:08:54.234812 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:54 crc kubenswrapper[4949]: I0216 11:08:54.234904 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:54 crc kubenswrapper[4949]: E0216 11:08:54.235260 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:54 crc kubenswrapper[4949]: I0216 11:08:54.234946 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:54 crc kubenswrapper[4949]: I0216 11:08:54.234904 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:54 crc kubenswrapper[4949]: E0216 11:08:54.235425 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:54 crc kubenswrapper[4949]: E0216 11:08:54.235488 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:54 crc kubenswrapper[4949]: E0216 11:08:54.235554 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:56 crc kubenswrapper[4949]: I0216 11:08:56.234940 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:56 crc kubenswrapper[4949]: I0216 11:08:56.235004 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:56 crc kubenswrapper[4949]: I0216 11:08:56.235334 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:56 crc kubenswrapper[4949]: E0216 11:08:56.235533 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:56 crc kubenswrapper[4949]: I0216 11:08:56.235834 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:56 crc kubenswrapper[4949]: E0216 11:08:56.235954 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:08:56 crc kubenswrapper[4949]: E0216 11:08:56.236331 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:56 crc kubenswrapper[4949]: E0216 11:08:56.236636 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:56 crc kubenswrapper[4949]: E0216 11:08:56.496491 4949 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:08:58 crc kubenswrapper[4949]: I0216 11:08:58.234559 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:08:58 crc kubenswrapper[4949]: I0216 11:08:58.234653 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:08:58 crc kubenswrapper[4949]: I0216 11:08:58.234584 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:08:58 crc kubenswrapper[4949]: I0216 11:08:58.234914 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:08:58 crc kubenswrapper[4949]: E0216 11:08:58.235158 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:08:58 crc kubenswrapper[4949]: E0216 11:08:58.235367 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:08:58 crc kubenswrapper[4949]: E0216 11:08:58.235891 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:08:58 crc kubenswrapper[4949]: E0216 11:08:58.234997 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:09:00 crc kubenswrapper[4949]: I0216 11:09:00.235266 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:00 crc kubenswrapper[4949]: E0216 11:09:00.235421 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:09:00 crc kubenswrapper[4949]: I0216 11:09:00.235293 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:09:00 crc kubenswrapper[4949]: E0216 11:09:00.235508 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:09:00 crc kubenswrapper[4949]: I0216 11:09:00.235271 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:00 crc kubenswrapper[4949]: I0216 11:09:00.235299 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:09:00 crc kubenswrapper[4949]: E0216 11:09:00.235578 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:09:00 crc kubenswrapper[4949]: E0216 11:09:00.235715 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:09:01 crc kubenswrapper[4949]: E0216 11:09:01.498422 4949 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:09:02 crc kubenswrapper[4949]: I0216 11:09:02.234674 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:09:02 crc kubenswrapper[4949]: I0216 11:09:02.234745 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:02 crc kubenswrapper[4949]: I0216 11:09:02.234754 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:02 crc kubenswrapper[4949]: I0216 11:09:02.234686 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:09:02 crc kubenswrapper[4949]: E0216 11:09:02.234936 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:09:02 crc kubenswrapper[4949]: E0216 11:09:02.235285 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:09:02 crc kubenswrapper[4949]: E0216 11:09:02.235418 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:09:02 crc kubenswrapper[4949]: E0216 11:09:02.235514 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:09:03 crc kubenswrapper[4949]: I0216 11:09:03.235841 4949 scope.go:117] "RemoveContainer" containerID="e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee" Feb 16 11:09:04 crc kubenswrapper[4949]: I0216 11:09:04.235137 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:09:04 crc kubenswrapper[4949]: I0216 11:09:04.235134 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:04 crc kubenswrapper[4949]: E0216 11:09:04.235634 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:09:04 crc kubenswrapper[4949]: I0216 11:09:04.235303 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:04 crc kubenswrapper[4949]: I0216 11:09:04.235194 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:09:04 crc kubenswrapper[4949]: E0216 11:09:04.235809 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:09:04 crc kubenswrapper[4949]: E0216 11:09:04.235875 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:09:04 crc kubenswrapper[4949]: E0216 11:09:04.235951 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:09:04 crc kubenswrapper[4949]: I0216 11:09:04.288895 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/1.log" Feb 16 11:09:04 crc kubenswrapper[4949]: I0216 11:09:04.288973 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jsmls" event={"ID":"3e42a398-f83a-4463-9ab7-4e093e80d744","Type":"ContainerStarted","Data":"9fc6653cc53f85a17b85cce7ecc6bfbaf249773cba879f6752ec7d6e9b4cf323"} Feb 16 11:09:06 crc kubenswrapper[4949]: I0216 11:09:06.234310 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:09:06 crc kubenswrapper[4949]: I0216 11:09:06.234448 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:06 crc kubenswrapper[4949]: E0216 11:09:06.234525 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6v4x7" podUID="965b4f20-8786-4c47-8721-c348942551d6" Feb 16 11:09:06 crc kubenswrapper[4949]: I0216 11:09:06.234369 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:09:06 crc kubenswrapper[4949]: I0216 11:09:06.234603 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:06 crc kubenswrapper[4949]: E0216 11:09:06.234759 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 16 11:09:06 crc kubenswrapper[4949]: E0216 11:09:06.234810 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 16 11:09:06 crc kubenswrapper[4949]: E0216 11:09:06.234872 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.234865 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.234919 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.235200 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.235354 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.239055 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.239326 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.239895 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.239950 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.240469 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 16 11:09:08 crc kubenswrapper[4949]: I0216 11:09:08.241278 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.888863 4949 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.943034 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-drxpj"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.943694 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.944343 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g4khj"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.945406 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.946236 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.947223 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.947458 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-kwq47"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.948008 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.948536 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.949014 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.955479 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.956252 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.957728 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.958528 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.958920 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.959570 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-2j2dm"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.959623 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.960502 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.960859 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.961069 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.967010 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.967367 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.967582 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.967931 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.968125 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.967600 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.968576 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.968818 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.969040 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.969208 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.969395 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.969632 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.969801 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.969901 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.970097 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.971815 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vm5md"] Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.973014 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.974099 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.992881 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.993348 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.994705 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.996466 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.999045 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 16 11:09:09 crc kubenswrapper[4949]: I0216 11:09:09.999474 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.000374 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.000581 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.000941 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.001278 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.001590 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.001865 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.005379 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.005820 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.005829 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.005953 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.006076 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.006198 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.013503 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.020731 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.021913 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.022133 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.022279 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.022414 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.022549 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.022613 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.022808 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.023005 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.023472 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.027366 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.027635 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.027761 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.028350 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.028448 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.028632 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.028797 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.028994 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.030351 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.030555 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-488fx"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.031430 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.031535 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.031962 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032008 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntsvz\" (UniqueName: \"kubernetes.io/projected/5e66d330-dc75-4a98-9739-589d8df61a68-kube-api-access-ntsvz\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032040 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032073 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c86306bc-5026-49a4-adee-3fd485587e75-trusted-ca\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032098 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032123 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2690ae1-0168-43e5-aa99-3e926f6979d8-serving-cert\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032149 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73ad7549-c192-4d13-b957-940364f2911b-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032198 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1114973a-d32b-45b3-bce3-dcf894f30d43-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032227 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6665\" (UniqueName: \"kubernetes.io/projected/89406f96-f3ec-4323-bb6a-c42175151f9d-kube-api-access-x6665\") pod \"downloads-7954f5f757-kwq47\" (UID: \"89406f96-f3ec-4323-bb6a-c42175151f9d\") " pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032252 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7f8p\" (UniqueName: \"kubernetes.io/projected/a398cced-c30a-4638-96c2-c7fa84672dab-kube-api-access-n7f8p\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032276 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032304 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-image-import-ca\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032326 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032353 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c86306bc-5026-49a4-adee-3fd485587e75-serving-cert\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032405 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-etcd-client\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032434 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73ad7549-c192-4d13-b957-940364f2911b-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032458 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-serving-cert\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032482 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-encryption-config\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032510 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032540 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032572 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbv2f\" (UniqueName: \"kubernetes.io/projected/c86306bc-5026-49a4-adee-3fd485587e75-kube-api-access-tbv2f\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032615 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp7d4\" (UniqueName: \"kubernetes.io/projected/df28feb8-3f45-493c-a794-bb64821b0fb0-kube-api-access-qp7d4\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032643 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxdnr\" (UniqueName: \"kubernetes.io/projected/1114973a-d32b-45b3-bce3-dcf894f30d43-kube-api-access-bxdnr\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032677 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-audit-policies\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032705 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-serving-cert\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032731 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-audit\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032758 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1114973a-d32b-45b3-bce3-dcf894f30d43-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032789 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032814 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032840 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ce076bd0-01e6-4ebf-a060-385ae309169a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-tmlk2\" (UID: \"ce076bd0-01e6-4ebf-a060-385ae309169a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032869 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5e66d330-dc75-4a98-9739-589d8df61a68-audit-dir\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032899 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lfbm\" (UniqueName: \"kubernetes.io/projected/d2690ae1-0168-43e5-aa99-3e926f6979d8-kube-api-access-6lfbm\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032928 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-etcd-client\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032955 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w25sm\" (UniqueName: \"kubernetes.io/projected/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-kube-api-access-w25sm\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.032979 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1114973a-d32b-45b3-bce3-dcf894f30d43-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033005 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rqdh\" (UniqueName: \"kubernetes.io/projected/ce076bd0-01e6-4ebf-a060-385ae309169a-kube-api-access-8rqdh\") pod \"cluster-samples-operator-665b6dd947-tmlk2\" (UID: \"ce076bd0-01e6-4ebf-a060-385ae309169a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033030 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033056 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033084 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033113 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033141 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf42l\" (UniqueName: \"kubernetes.io/projected/73ad7549-c192-4d13-b957-940364f2911b-kube-api-access-cf42l\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033220 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033250 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a398cced-c30a-4638-96c2-c7fa84672dab-node-pullsecrets\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033278 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033305 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-audit-policies\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033333 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c86306bc-5026-49a4-adee-3fd485587e75-config\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033356 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-config\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033380 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-config\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033404 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033441 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-encryption-config\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033470 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033503 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-etcd-serving-ca\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033524 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a398cced-c30a-4638-96c2-c7fa84672dab-audit-dir\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033546 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/df28feb8-3f45-493c-a794-bb64821b0fb0-audit-dir\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033572 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-client-ca\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.033938 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.034130 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.034277 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.034845 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.040486 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5nxhr"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.041381 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.042158 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.043788 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.045655 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.046117 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.048768 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.049742 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.049891 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.050132 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.050328 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.050641 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.050782 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.050939 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.050953 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.051032 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.051056 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.051167 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.051624 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.058400 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.062795 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.062997 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.063084 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.063400 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.063533 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.063619 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.063404 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.063949 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.064129 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.076536 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-tt99q"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.079047 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.083778 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzx7x"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.084556 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.086266 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.086081 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.087890 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.093614 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.093803 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.094084 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.096664 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.097327 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.097401 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.097536 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.105086 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.105666 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.110512 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.111145 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.111438 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.112374 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.113003 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.113476 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rk65d"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.114106 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.114378 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.118020 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mwmrl"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.118838 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.119462 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.119927 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.120510 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.120817 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.120925 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.121004 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.121271 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.121468 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.124048 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.124968 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.125369 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-5xfq9"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.125897 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.128225 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-t47hp"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.128940 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.129023 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.129338 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.131624 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.132045 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.132228 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134392 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txm47\" (UniqueName: \"kubernetes.io/projected/6360e7de-ae30-4cfc-8450-a0eaff573c5c-kube-api-access-txm47\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134446 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c86306bc-5026-49a4-adee-3fd485587e75-serving-cert\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134469 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-image-import-ca\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134484 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134524 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-service-ca\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134547 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/048dcd85-c085-40e8-b952-d97abe29ac36-service-ca-bundle\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134623 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-etcd-client\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134643 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73ad7549-c192-4d13-b957-940364f2911b-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134661 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-serving-cert\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134711 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-encryption-config\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134732 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134777 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134795 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7pd6\" (UniqueName: \"kubernetes.io/projected/0492d421-806b-48e4-8a97-3032888e370e-kube-api-access-t7pd6\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134813 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5a74756d-ce49-4ba1-9dcb-fedee6464166-auth-proxy-config\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134857 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbv2f\" (UniqueName: \"kubernetes.io/projected/c86306bc-5026-49a4-adee-3fd485587e75-kube-api-access-tbv2f\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134879 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134898 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/30431645-dce8-4da3-a237-1ec14d3b2c73-metrics-tls\") pod \"dns-operator-744455d44c-rk65d\" (UID: \"30431645-dce8-4da3-a237-1ec14d3b2c73\") " pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.134943 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-serving-cert\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135015 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-config\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135034 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-ca\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135063 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp7d4\" (UniqueName: \"kubernetes.io/projected/df28feb8-3f45-493c-a794-bb64821b0fb0-kube-api-access-qp7d4\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135107 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxdnr\" (UniqueName: \"kubernetes.io/projected/1114973a-d32b-45b3-bce3-dcf894f30d43-kube-api-access-bxdnr\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135130 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6360e7de-ae30-4cfc-8450-a0eaff573c5c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135149 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0492d421-806b-48e4-8a97-3032888e370e-serving-cert\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135205 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-audit-policies\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135226 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-serving-cert\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135273 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-audit\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135300 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-serving-cert\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135365 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/afc70085-b399-4f43-a311-0e38471ad055-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135390 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1114973a-d32b-45b3-bce3-dcf894f30d43-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135436 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d4l8\" (UniqueName: \"kubernetes.io/projected/048dcd85-c085-40e8-b952-d97abe29ac36-kube-api-access-4d4l8\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135460 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135478 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135522 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ce076bd0-01e6-4ebf-a060-385ae309169a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-tmlk2\" (UID: \"ce076bd0-01e6-4ebf-a060-385ae309169a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135543 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-client\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135560 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-stats-auth\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135603 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h9bv\" (UniqueName: \"kubernetes.io/projected/5a74756d-ce49-4ba1-9dcb-fedee6464166-kube-api-access-2h9bv\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135621 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5e66d330-dc75-4a98-9739-589d8df61a68-audit-dir\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135639 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afc70085-b399-4f43-a311-0e38471ad055-config\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135683 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lfbm\" (UniqueName: \"kubernetes.io/projected/d2690ae1-0168-43e5-aa99-3e926f6979d8-kube-api-access-6lfbm\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135702 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-config\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135721 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-oauth-config\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135762 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-service-ca\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135780 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-etcd-client\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135796 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w25sm\" (UniqueName: \"kubernetes.io/projected/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-kube-api-access-w25sm\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135838 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1114973a-d32b-45b3-bce3-dcf894f30d43-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135858 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rqdh\" (UniqueName: \"kubernetes.io/projected/ce076bd0-01e6-4ebf-a060-385ae309169a-kube-api-access-8rqdh\") pod \"cluster-samples-operator-665b6dd947-tmlk2\" (UID: \"ce076bd0-01e6-4ebf-a060-385ae309169a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135878 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-trusted-ca-bundle\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135921 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85hth\" (UniqueName: \"kubernetes.io/projected/468461de-4a56-47b0-a5a9-cf6e51b6de47-kube-api-access-85hth\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135940 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a74756d-ce49-4ba1-9dcb-fedee6464166-config\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135956 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135999 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136017 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136037 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136079 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136099 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-client-ca\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136116 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf42l\" (UniqueName: \"kubernetes.io/projected/73ad7549-c192-4d13-b957-940364f2911b-kube-api-access-cf42l\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136157 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136216 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6360e7de-ae30-4cfc-8450-a0eaff573c5c-images\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136233 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-config\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136251 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a398cced-c30a-4638-96c2-c7fa84672dab-node-pullsecrets\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136293 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136311 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/713cc598-c331-4416-9826-0418c542a29b-trusted-ca\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136329 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v7xj\" (UniqueName: \"kubernetes.io/projected/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-kube-api-access-5v7xj\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136374 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-audit-policies\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136394 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8314a72-2893-476e-849e-33cd71b1ebd5-serving-cert\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136417 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c86306bc-5026-49a4-adee-3fd485587e75-config\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136474 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-config\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136500 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9svvw\" (UniqueName: \"kubernetes.io/projected/b292c120-8566-4d1a-b522-1739f12db3ab-kube-api-access-9svvw\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136553 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-config\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136572 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136612 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6360e7de-ae30-4cfc-8450-a0eaff573c5c-proxy-tls\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136631 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-default-certificate\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136648 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4f7n\" (UniqueName: \"kubernetes.io/projected/b8314a72-2893-476e-849e-33cd71b1ebd5-kube-api-access-s4f7n\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136703 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-encryption-config\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136722 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136740 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/713cc598-c331-4416-9826-0418c542a29b-metrics-tls\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136781 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-config\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136799 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-metrics-certs\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136827 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-etcd-serving-ca\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136862 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a398cced-c30a-4638-96c2-c7fa84672dab-audit-dir\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136881 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/713cc598-c331-4416-9826-0418c542a29b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136926 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/df28feb8-3f45-493c-a794-bb64821b0fb0-audit-dir\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136946 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-client-ca\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136964 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-service-ca-bundle\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.136998 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b292c120-8566-4d1a-b522-1739f12db3ab-serving-cert\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137021 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137040 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntsvz\" (UniqueName: \"kubernetes.io/projected/5e66d330-dc75-4a98-9739-589d8df61a68-kube-api-access-ntsvz\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137057 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137097 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fqw5\" (UniqueName: \"kubernetes.io/projected/30431645-dce8-4da3-a237-1ec14d3b2c73-kube-api-access-5fqw5\") pod \"dns-operator-744455d44c-rk65d\" (UID: \"30431645-dce8-4da3-a237-1ec14d3b2c73\") " pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137114 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afc70085-b399-4f43-a311-0e38471ad055-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137152 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5a74756d-ce49-4ba1-9dcb-fedee6464166-machine-approver-tls\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137200 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c86306bc-5026-49a4-adee-3fd485587e75-trusted-ca\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137221 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137238 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2690ae1-0168-43e5-aa99-3e926f6979d8-serving-cert\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137278 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137304 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73ad7549-c192-4d13-b957-940364f2911b-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137324 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1114973a-d32b-45b3-bce3-dcf894f30d43-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137360 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-oauth-serving-cert\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137381 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6665\" (UniqueName: \"kubernetes.io/projected/89406f96-f3ec-4323-bb6a-c42175151f9d-kube-api-access-x6665\") pod \"downloads-7954f5f757-kwq47\" (UID: \"89406f96-f3ec-4323-bb6a-c42175151f9d\") " pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137399 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137440 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7f8p\" (UniqueName: \"kubernetes.io/projected/a398cced-c30a-4638-96c2-c7fa84672dab-kube-api-access-n7f8p\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137460 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137479 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bh4g\" (UniqueName: \"kubernetes.io/projected/713cc598-c331-4416-9826-0418c542a29b-kube-api-access-4bh4g\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.137526 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b8314a72-2893-476e-849e-33cd71b1ebd5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.135885 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.138559 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.138727 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.138890 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-image-import-ca\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.139105 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-56tdr"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.139144 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.139518 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.139712 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.140508 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.140750 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.141795 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-etcd-serving-ca\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.141857 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a398cced-c30a-4638-96c2-c7fa84672dab-audit-dir\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.141896 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/df28feb8-3f45-493c-a794-bb64821b0fb0-audit-dir\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.142783 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-client-ca\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.143997 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73ad7549-c192-4d13-b957-940364f2911b-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.145638 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-audit-policies\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.147019 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zdcp8"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.147670 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.148063 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.148488 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-serving-cert\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.148944 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-audit\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.149456 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.149920 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-encryption-config\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.149968 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.151661 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5e66d330-dc75-4a98-9739-589d8df61a68-audit-dir\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.151911 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.152322 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.152800 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.153385 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c86306bc-5026-49a4-adee-3fd485587e75-trusted-ca\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.153575 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.153770 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.154500 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1114973a-d32b-45b3-bce3-dcf894f30d43-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.154874 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.156117 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-audit-policies\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.156285 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.156526 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.156563 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/df28feb8-3f45-493c-a794-bb64821b0fb0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.159784 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.160408 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.160486 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a398cced-c30a-4638-96c2-c7fa84672dab-node-pullsecrets\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.161419 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c86306bc-5026-49a4-adee-3fd485587e75-config\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.162632 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a398cced-c30a-4638-96c2-c7fa84672dab-config\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.163547 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-config\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.163675 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-etcd-client\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.171914 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.172330 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2690ae1-0168-43e5-aa99-3e926f6979d8-serving-cert\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.172771 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.173423 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-encryption-config\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.173481 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.173909 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1114973a-d32b-45b3-bce3-dcf894f30d43-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.175477 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ce076bd0-01e6-4ebf-a060-385ae309169a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-tmlk2\" (UID: \"ce076bd0-01e6-4ebf-a060-385ae309169a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.177650 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vwhm9"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.178074 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-khb7r"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.178384 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-9ddvv"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.181119 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.184034 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a398cced-c30a-4638-96c2-c7fa84672dab-etcd-client\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.184104 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.184419 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.184553 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73ad7549-c192-4d13-b957-940364f2911b-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.184624 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-clst2"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.185135 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.185364 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c86306bc-5026-49a4-adee-3fd485587e75-serving-cert\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.185727 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.186111 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.187591 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.188583 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df28feb8-3f45-493c-a794-bb64821b0fb0-serving-cert\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.188642 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.196936 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.196993 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-drxpj"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.197034 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.197095 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.197112 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-22nsc"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.197297 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.199525 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.199668 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mwmrl"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.200080 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-22nsc" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.206281 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vm5md"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.206351 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-kwq47"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.206366 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-2j2dm"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.206378 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.209011 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.210272 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.213722 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.213806 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.215140 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g4khj"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.216376 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-tt99q"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.217701 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.219607 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.222059 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.226401 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.226414 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5nxhr"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.227679 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.229193 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vwhm9"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.231927 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.234820 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-56tdr"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.237473 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-5pbl4"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238402 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6360e7de-ae30-4cfc-8450-a0eaff573c5c-images\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238427 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-config\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238448 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v7xj\" (UniqueName: \"kubernetes.io/projected/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-kube-api-access-5v7xj\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238469 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/713cc598-c331-4416-9826-0418c542a29b-trusted-ca\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238488 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8314a72-2893-476e-849e-33cd71b1ebd5-serving-cert\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238507 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9svvw\" (UniqueName: \"kubernetes.io/projected/b292c120-8566-4d1a-b522-1739f12db3ab-kube-api-access-9svvw\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238523 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4f7n\" (UniqueName: \"kubernetes.io/projected/b8314a72-2893-476e-849e-33cd71b1ebd5-kube-api-access-s4f7n\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238540 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6360e7de-ae30-4cfc-8450-a0eaff573c5c-proxy-tls\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238558 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-default-certificate\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238583 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/713cc598-c331-4416-9826-0418c542a29b-metrics-tls\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238599 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-config\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238615 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-metrics-certs\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238652 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/713cc598-c331-4416-9826-0418c542a29b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238670 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-service-ca-bundle\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238686 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fqw5\" (UniqueName: \"kubernetes.io/projected/30431645-dce8-4da3-a237-1ec14d3b2c73-kube-api-access-5fqw5\") pod \"dns-operator-744455d44c-rk65d\" (UID: \"30431645-dce8-4da3-a237-1ec14d3b2c73\") " pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238704 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b292c120-8566-4d1a-b522-1739f12db3ab-serving-cert\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238729 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238747 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afc70085-b399-4f43-a311-0e38471ad055-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238763 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5a74756d-ce49-4ba1-9dcb-fedee6464166-machine-approver-tls\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238781 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-oauth-serving-cert\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238801 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238825 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bh4g\" (UniqueName: \"kubernetes.io/projected/713cc598-c331-4416-9826-0418c542a29b-kube-api-access-4bh4g\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238841 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b8314a72-2893-476e-849e-33cd71b1ebd5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238858 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txm47\" (UniqueName: \"kubernetes.io/projected/6360e7de-ae30-4cfc-8450-a0eaff573c5c-kube-api-access-txm47\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238876 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-service-ca\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238894 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/048dcd85-c085-40e8-b952-d97abe29ac36-service-ca-bundle\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238933 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7pd6\" (UniqueName: \"kubernetes.io/projected/0492d421-806b-48e4-8a97-3032888e370e-kube-api-access-t7pd6\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238952 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238969 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/30431645-dce8-4da3-a237-1ec14d3b2c73-metrics-tls\") pod \"dns-operator-744455d44c-rk65d\" (UID: \"30431645-dce8-4da3-a237-1ec14d3b2c73\") " pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.238984 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5a74756d-ce49-4ba1-9dcb-fedee6464166-auth-proxy-config\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239007 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-serving-cert\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239023 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-config\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239040 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-ca\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239076 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6360e7de-ae30-4cfc-8450-a0eaff573c5c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239093 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0492d421-806b-48e4-8a97-3032888e370e-serving-cert\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239113 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239125 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-serving-cert\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239144 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/afc70085-b399-4f43-a311-0e38471ad055-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239163 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d4l8\" (UniqueName: \"kubernetes.io/projected/048dcd85-c085-40e8-b952-d97abe29ac36-kube-api-access-4d4l8\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239193 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h9bv\" (UniqueName: \"kubernetes.io/projected/5a74756d-ce49-4ba1-9dcb-fedee6464166-kube-api-access-2h9bv\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239210 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-client\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239289 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239327 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-stats-auth\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239346 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afc70085-b399-4f43-a311-0e38471ad055-config\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239369 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-config\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239385 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-oauth-config\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239413 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-service-ca\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239429 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85hth\" (UniqueName: \"kubernetes.io/projected/468461de-4a56-47b0-a5a9-cf6e51b6de47-kube-api-access-85hth\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239461 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-trusted-ca-bundle\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239478 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a74756d-ce49-4ba1-9dcb-fedee6464166-config\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239493 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.239510 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-client-ca\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.240202 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.240436 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-client-ca\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.240716 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-config\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.241532 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afc70085-b399-4f43-a311-0e38471ad055-config\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.242598 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b8314a72-2893-476e-849e-33cd71b1ebd5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.242716 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-service-ca-bundle\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.243566 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-config\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.244456 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5a74756d-ce49-4ba1-9dcb-fedee6464166-auth-proxy-config\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.244555 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a74756d-ce49-4ba1-9dcb-fedee6464166-config\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.244581 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-khb7r"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.244976 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.245277 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6360e7de-ae30-4cfc-8450-a0eaff573c5c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.245429 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5a74756d-ce49-4ba1-9dcb-fedee6464166-machine-approver-tls\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.246217 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/30431645-dce8-4da3-a237-1ec14d3b2c73-metrics-tls\") pod \"dns-operator-744455d44c-rk65d\" (UID: \"30431645-dce8-4da3-a237-1ec14d3b2c73\") " pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.246359 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8314a72-2893-476e-849e-33cd71b1ebd5-serving-cert\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.246130 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zsl98"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.247669 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zdcp8"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.247796 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.247894 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0492d421-806b-48e4-8a97-3032888e370e-serving-cert\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.248734 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.249788 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.250107 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-serving-cert\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.251158 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzx7x"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.252117 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afc70085-b399-4f43-a311-0e38471ad055-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.252444 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-9ddvv"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.253652 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.255558 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5xfq9"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.255797 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rk65d"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.257101 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.258266 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.260832 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-22nsc"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.261776 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.262916 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-clst2"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.263912 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-5pbl4"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.265007 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zsl98"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.266028 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-s8znm"] Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.266634 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.266848 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.286526 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.306014 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.326790 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.346277 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.356013 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-client\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.368517 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.387772 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.395885 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.406660 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.413032 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.427918 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.436310 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b292c120-8566-4d1a-b522-1739f12db3ab-serving-cert\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.446435 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.451419 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-service-ca\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.467268 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.487067 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.490692 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6360e7de-ae30-4cfc-8450-a0eaff573c5c-images\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.506529 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.513616 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-config\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.526438 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.532039 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b292c120-8566-4d1a-b522-1739f12db3ab-etcd-ca\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.546503 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.566083 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.586330 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.594492 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6360e7de-ae30-4cfc-8450-a0eaff573c5c-proxy-tls\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.625880 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.646746 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.656067 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/713cc598-c331-4416-9826-0418c542a29b-metrics-tls\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.665533 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.693205 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.702302 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/713cc598-c331-4416-9826-0418c542a29b-trusted-ca\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.706669 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.727840 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.733979 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-oauth-serving-cert\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.746510 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.767432 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.778126 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-serving-cert\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.790623 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.796074 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-oauth-config\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.807196 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.812944 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-config\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.826816 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.833765 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-service-ca\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.855381 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.864558 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-trusted-ca-bundle\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.866463 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.874695 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-metrics-certs\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.886767 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.906305 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.911984 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/048dcd85-c085-40e8-b952-d97abe29ac36-service-ca-bundle\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.926705 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.946859 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.966328 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.976134 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-default-certificate\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.987671 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 16 11:09:10 crc kubenswrapper[4949]: I0216 11:09:10.997205 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/048dcd85-c085-40e8-b952-d97abe29ac36-stats-auth\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.026795 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.047778 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.066791 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.119818 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.126004 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.133370 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rqdh\" (UniqueName: \"kubernetes.io/projected/ce076bd0-01e6-4ebf-a060-385ae309169a-kube-api-access-8rqdh\") pod \"cluster-samples-operator-665b6dd947-tmlk2\" (UID: \"ce076bd0-01e6-4ebf-a060-385ae309169a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.145204 4949 request.go:700] Waited for 1.00179981s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/serviceaccounts/console-operator/token Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.166302 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.168053 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbv2f\" (UniqueName: \"kubernetes.io/projected/c86306bc-5026-49a4-adee-3fd485587e75-kube-api-access-tbv2f\") pod \"console-operator-58897d9998-2j2dm\" (UID: \"c86306bc-5026-49a4-adee-3fd485587e75\") " pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.201765 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lfbm\" (UniqueName: \"kubernetes.io/projected/d2690ae1-0168-43e5-aa99-3e926f6979d8-kube-api-access-6lfbm\") pod \"controller-manager-879f6c89f-drxpj\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.227225 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp7d4\" (UniqueName: \"kubernetes.io/projected/df28feb8-3f45-493c-a794-bb64821b0fb0-kube-api-access-qp7d4\") pod \"apiserver-7bbb656c7d-7r2kv\" (UID: \"df28feb8-3f45-493c-a794-bb64821b0fb0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.247154 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxdnr\" (UniqueName: \"kubernetes.io/projected/1114973a-d32b-45b3-bce3-dcf894f30d43-kube-api-access-bxdnr\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.262697 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w25sm\" (UniqueName: \"kubernetes.io/projected/f19591a9-a16b-4d6d-a59d-4d2f825ad7c7-kube-api-access-w25sm\") pod \"openshift-apiserver-operator-796bbdcf4f-n4llx\" (UID: \"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.284475 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1114973a-d32b-45b3-bce3-dcf894f30d43-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fkf4v\" (UID: \"1114973a-d32b-45b3-bce3-dcf894f30d43\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.291647 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.303792 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntsvz\" (UniqueName: \"kubernetes.io/projected/5e66d330-dc75-4a98-9739-589d8df61a68-kube-api-access-ntsvz\") pod \"oauth-openshift-558db77b4-g4khj\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.306154 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.328438 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.330475 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.334183 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.346638 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.349228 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.367434 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.390047 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.407599 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.428842 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.451375 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.467033 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.468779 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.487561 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.506902 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.514817 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2"] Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.527868 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.532040 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.551182 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.551476 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.554547 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx"] Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.593485 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf42l\" (UniqueName: \"kubernetes.io/projected/73ad7549-c192-4d13-b957-940364f2911b-kube-api-access-cf42l\") pod \"openshift-controller-manager-operator-756b6f6bc6-kq7f7\" (UID: \"73ad7549-c192-4d13-b957-940364f2911b\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.603577 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6665\" (UniqueName: \"kubernetes.io/projected/89406f96-f3ec-4323-bb6a-c42175151f9d-kube-api-access-x6665\") pod \"downloads-7954f5f757-kwq47\" (UID: \"89406f96-f3ec-4323-bb6a-c42175151f9d\") " pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.612426 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.628098 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.634568 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7f8p\" (UniqueName: \"kubernetes.io/projected/a398cced-c30a-4638-96c2-c7fa84672dab-kube-api-access-n7f8p\") pod \"apiserver-76f77b778f-vm5md\" (UID: \"a398cced-c30a-4638-96c2-c7fa84672dab\") " pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.636226 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-2j2dm"] Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.654611 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.673513 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.694921 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: W0216 11:09:11.703479 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc86306bc_5026_49a4_adee_3fd485587e75.slice/crio-05cd86e35f5c7a8a20a5f617c6c1e50dd2fcf611d98ef6810b4df29b73448276 WatchSource:0}: Error finding container 05cd86e35f5c7a8a20a5f617c6c1e50dd2fcf611d98ef6810b4df29b73448276: Status 404 returned error can't find the container with id 05cd86e35f5c7a8a20a5f617c6c1e50dd2fcf611d98ef6810b4df29b73448276 Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.708601 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.716035 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.725972 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.726876 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.744953 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-drxpj"] Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.750195 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.769625 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv"] Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.772729 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.788902 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.807316 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.821132 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v"] Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.826719 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.847315 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 16 11:09:11 crc kubenswrapper[4949]: W0216 11:09:11.852209 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2690ae1_0168_43e5_aa99_3e926f6979d8.slice/crio-a31fdf74d29b0d02ab83860e1e23f9b26766b71db0545a29afd38b7fb2ff85a7 WatchSource:0}: Error finding container a31fdf74d29b0d02ab83860e1e23f9b26766b71db0545a29afd38b7fb2ff85a7: Status 404 returned error can't find the container with id a31fdf74d29b0d02ab83860e1e23f9b26766b71db0545a29afd38b7fb2ff85a7 Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.867834 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.894641 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.907251 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.914000 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-kwq47"] Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.926426 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.936646 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g4khj"] Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.948574 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.972935 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 16 11:09:11 crc kubenswrapper[4949]: W0216 11:09:11.986214 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e66d330_dc75_4a98_9739_589d8df61a68.slice/crio-ca21aadffd68ddf21e7c12f2d3627c16f2da9ce85a1bce9526d1bcdddde78288 WatchSource:0}: Error finding container ca21aadffd68ddf21e7c12f2d3627c16f2da9ce85a1bce9526d1bcdddde78288: Status 404 returned error can't find the container with id ca21aadffd68ddf21e7c12f2d3627c16f2da9ce85a1bce9526d1bcdddde78288 Feb 16 11:09:11 crc kubenswrapper[4949]: I0216 11:09:11.990362 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 16 11:09:12 crc kubenswrapper[4949]: W0216 11:09:12.007882 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89406f96_f3ec_4323_bb6a_c42175151f9d.slice/crio-5a7b6cf12764d27ff03de6364004e4789640208ca3ed3f205bb4f3df5a2e9a31 WatchSource:0}: Error finding container 5a7b6cf12764d27ff03de6364004e4789640208ca3ed3f205bb4f3df5a2e9a31: Status 404 returned error can't find the container with id 5a7b6cf12764d27ff03de6364004e4789640208ca3ed3f205bb4f3df5a2e9a31 Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.008641 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.027290 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.047524 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.067289 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.086767 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.106737 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.114808 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vm5md"] Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.125715 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.146510 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.164841 4949 request.go:700] Waited for 1.924965314s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns/secrets?fieldSelector=metadata.name%3Ddns-default-metrics-tls&limit=500&resourceVersion=0 Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.167375 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.245955 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9svvw\" (UniqueName: \"kubernetes.io/projected/b292c120-8566-4d1a-b522-1739f12db3ab-kube-api-access-9svvw\") pod \"etcd-operator-b45778765-mwmrl\" (UID: \"b292c120-8566-4d1a-b522-1739f12db3ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.246070 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v7xj\" (UniqueName: \"kubernetes.io/projected/4c93693d-d1ae-41d1-9ed6-744dc03aa0b1-kube-api-access-5v7xj\") pod \"authentication-operator-69f744f599-5nxhr\" (UID: \"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.247097 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/713cc598-c331-4416-9826-0418c542a29b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.268887 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4f7n\" (UniqueName: \"kubernetes.io/projected/b8314a72-2893-476e-849e-33cd71b1ebd5-kube-api-access-s4f7n\") pod \"openshift-config-operator-7777fb866f-tt99q\" (UID: \"b8314a72-2893-476e-849e-33cd71b1ebd5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.274382 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7"] Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.284239 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d4l8\" (UniqueName: \"kubernetes.io/projected/048dcd85-c085-40e8-b952-d97abe29ac36-kube-api-access-4d4l8\") pod \"router-default-5444994796-t47hp\" (UID: \"048dcd85-c085-40e8-b952-d97abe29ac36\") " pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:12 crc kubenswrapper[4949]: W0216 11:09:12.296564 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73ad7549_c192_4d13_b957_940364f2911b.slice/crio-124ca59ccedbdbc28abcb1312f5515102297125f8c3ff26eb45738a314e3fb88 WatchSource:0}: Error finding container 124ca59ccedbdbc28abcb1312f5515102297125f8c3ff26eb45738a314e3fb88: Status 404 returned error can't find the container with id 124ca59ccedbdbc28abcb1312f5515102297125f8c3ff26eb45738a314e3fb88 Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.304725 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7pd6\" (UniqueName: \"kubernetes.io/projected/0492d421-806b-48e4-8a97-3032888e370e-kube-api-access-t7pd6\") pod \"route-controller-manager-6576b87f9c-p2jx5\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.323152 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h9bv\" (UniqueName: \"kubernetes.io/projected/5a74756d-ce49-4ba1-9dcb-fedee6464166-kube-api-access-2h9bv\") pod \"machine-approver-56656f9798-488fx\" (UID: \"5a74756d-ce49-4ba1-9dcb-fedee6464166\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.324048 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" event={"ID":"5e66d330-dc75-4a98-9739-589d8df61a68","Type":"ContainerStarted","Data":"ca21aadffd68ddf21e7c12f2d3627c16f2da9ce85a1bce9526d1bcdddde78288"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.326029 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-kwq47" event={"ID":"89406f96-f3ec-4323-bb6a-c42175151f9d","Type":"ContainerStarted","Data":"101a41d156e7d960d6955a37a690504c88c6b3b7aff45f1c234f73e7bdbe2a58"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.326102 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-kwq47" event={"ID":"89406f96-f3ec-4323-bb6a-c42175151f9d","Type":"ContainerStarted","Data":"5a7b6cf12764d27ff03de6364004e4789640208ca3ed3f205bb4f3df5a2e9a31"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.326208 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.329237 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.329524 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.332105 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" event={"ID":"d2690ae1-0168-43e5-aa99-3e926f6979d8","Type":"ContainerStarted","Data":"45be470dec9d32f09c00568509f058dfef333c5d781b4ba94484dce0c805464e"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.332150 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" event={"ID":"d2690ae1-0168-43e5-aa99-3e926f6979d8","Type":"ContainerStarted","Data":"a31fdf74d29b0d02ab83860e1e23f9b26766b71db0545a29afd38b7fb2ff85a7"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.332385 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.334754 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.335934 4949 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-drxpj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.336015 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" podUID="d2690ae1-0168-43e5-aa99-3e926f6979d8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.337164 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" event={"ID":"ce076bd0-01e6-4ebf-a060-385ae309169a","Type":"ContainerStarted","Data":"f74dee74c12366bc367f09f7d340d325bde47e96604ea74dbd21847c2d7b51ed"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.337221 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" event={"ID":"ce076bd0-01e6-4ebf-a060-385ae309169a","Type":"ContainerStarted","Data":"ab577c37582e8e7e88f0ce681db3dff3700fc94e2c3e2fb9b71def36730e8a18"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.337231 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" event={"ID":"ce076bd0-01e6-4ebf-a060-385ae309169a","Type":"ContainerStarted","Data":"46e2699a54eb592246b302a437a1ad4d2049c0288e6800cbdf62ee9c54e9d18c"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.340147 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" event={"ID":"df28feb8-3f45-493c-a794-bb64821b0fb0","Type":"ContainerStarted","Data":"c7ff49ab44e1c51385df619c9ad0396fd2bf5fc3a52b54e2bedc2818748bed1f"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.342136 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" event={"ID":"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7","Type":"ContainerStarted","Data":"95a2e4c9cf70487a5a2a8d73bbe1c40549d1a3a919fc876adbd307f5ae74d628"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.342234 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" event={"ID":"f19591a9-a16b-4d6d-a59d-4d2f825ad7c7","Type":"ContainerStarted","Data":"c7ebbb58b2f77892fa4a3936046c5be595154d918c91de6cfd3689dc43995b5b"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.343931 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" event={"ID":"a398cced-c30a-4638-96c2-c7fa84672dab","Type":"ContainerStarted","Data":"2760018126c8b00e3cc11e4dc8ae9b800f8df5852ad97f80639b739bb0c014f2"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.345763 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" event={"ID":"1114973a-d32b-45b3-bce3-dcf894f30d43","Type":"ContainerStarted","Data":"fedb8aef623f80bd73c57649e19f75ea05225c6a9d94781247b99fbc5ef4a45b"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.345789 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" event={"ID":"1114973a-d32b-45b3-bce3-dcf894f30d43","Type":"ContainerStarted","Data":"e576628c75261546a1db64c7b72760ba6cd91fb9b76ae72dade67c2ad403b30d"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.347489 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.348917 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" event={"ID":"73ad7549-c192-4d13-b957-940364f2911b","Type":"ContainerStarted","Data":"124ca59ccedbdbc28abcb1312f5515102297125f8c3ff26eb45738a314e3fb88"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.353906 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.355661 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ld5mv\" (UID: \"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.355678 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-2j2dm" event={"ID":"c86306bc-5026-49a4-adee-3fd485587e75","Type":"ContainerStarted","Data":"ef4fe97d6710efd0f04a6c52019d1f6186db851eb979ada8378601bd2af2402d"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.355722 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-2j2dm" event={"ID":"c86306bc-5026-49a4-adee-3fd485587e75","Type":"ContainerStarted","Data":"05cd86e35f5c7a8a20a5f617c6c1e50dd2fcf611d98ef6810b4df29b73448276"} Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.355998 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.358109 4949 patch_prober.go:28] interesting pod/console-operator-58897d9998-2j2dm container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/readyz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.358159 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-2j2dm" podUID="c86306bc-5026-49a4-adee-3fd485587e75" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/readyz\": dial tcp 10.217.0.21:8443: connect: connection refused" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.371542 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bh4g\" (UniqueName: \"kubernetes.io/projected/713cc598-c331-4416-9826-0418c542a29b-kube-api-access-4bh4g\") pod \"ingress-operator-5b745b69d9-6p46s\" (UID: \"713cc598-c331-4416-9826-0418c542a29b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.378100 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.387994 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txm47\" (UniqueName: \"kubernetes.io/projected/6360e7de-ae30-4cfc-8450-a0eaff573c5c-kube-api-access-txm47\") pod \"machine-config-operator-74547568cd-82fv4\" (UID: \"6360e7de-ae30-4cfc-8450-a0eaff573c5c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.413388 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.420548 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85hth\" (UniqueName: \"kubernetes.io/projected/468461de-4a56-47b0-a5a9-cf6e51b6de47-kube-api-access-85hth\") pod \"console-f9d7485db-5xfq9\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.422992 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.427899 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fqw5\" (UniqueName: \"kubernetes.io/projected/30431645-dce8-4da3-a237-1ec14d3b2c73-kube-api-access-5fqw5\") pod \"dns-operator-744455d44c-rk65d\" (UID: \"30431645-dce8-4da3-a237-1ec14d3b2c73\") " pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.439989 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.441637 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.447026 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.451770 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.456063 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/afc70085-b399-4f43-a311-0e38471ad055-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-j5268\" (UID: \"afc70085-b399-4f43-a311-0e38471ad055\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.458641 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.467869 4949 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.492448 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.513246 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.547217 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.547670 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687245 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687300 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687336 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crcd5\" (UniqueName: \"kubernetes.io/projected/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-kube-api-access-crcd5\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687379 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687431 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687461 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-certificates\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687499 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xv55\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-kube-api-access-5xv55\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687525 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-srv-cert\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687565 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687604 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-tls\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687666 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-config\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687706 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-profile-collector-cert\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687773 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-trusted-ca\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.687791 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-bound-sa-token\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: E0216 11:09:12.688495 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.188475629 +0000 UTC m=+142.817809794 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.693042 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.694862 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.771418 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5nxhr"] Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.789967 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790294 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5bc6fac3-953a-49e0-8743-d36684d2dfb6-metrics-tls\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790361 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-secret-volume\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790412 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-proxy-tls\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790535 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-profile-collector-cert\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790564 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctz5q\" (UniqueName: \"kubernetes.io/projected/f50a54ec-5563-4d56-8639-86a6003e0b0e-kube-api-access-ctz5q\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790589 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5bc6fac3-953a-49e0-8743-d36684d2dfb6-config-volume\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790619 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-node-bootstrap-token\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790702 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbvnr\" (UniqueName: \"kubernetes.io/projected/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-kube-api-access-nbvnr\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790725 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9f714671-859d-44dc-ad1a-30068d61639c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790794 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-tmpfs\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790850 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-682n5\" (UniqueName: \"kubernetes.io/projected/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-kube-api-access-682n5\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790887 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-certs\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.790918 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk9pf\" (UniqueName: \"kubernetes.io/projected/c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4-kube-api-access-tk9pf\") pod \"migrator-59844c95c7-5xpfr\" (UID: \"c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791024 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9f714671-859d-44dc-ad1a-30068d61639c-srv-cert\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791047 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4759ca8-9ff2-4e1e-a974-880caceeebb6-serving-cert\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791232 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-images\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791261 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ktts\" (UniqueName: \"kubernetes.io/projected/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-kube-api-access-2ktts\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791318 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-config\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: E0216 11:09:12.791357 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.291322342 +0000 UTC m=+142.920656507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791432 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-trusted-ca\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791459 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-bound-sa-token\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791510 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791531 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9x6c\" (UniqueName: \"kubernetes.io/projected/7aa8aba9-4384-470b-ab1b-d8df8efc889d-kube-api-access-k9x6c\") pod \"ingress-canary-22nsc\" (UID: \"7aa8aba9-4384-470b-ab1b-d8df8efc889d\") " pod="openshift-ingress-canary/ingress-canary-22nsc" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791584 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ce26720c-05cf-4162-942f-8b94e1bcd43e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zdcp8\" (UID: \"ce26720c-05cf-4162-942f-8b94e1bcd43e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791643 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7p7h\" (UniqueName: \"kubernetes.io/projected/ce26720c-05cf-4162-942f-8b94e1bcd43e-kube-api-access-c7p7h\") pod \"multus-admission-controller-857f4d67dd-zdcp8\" (UID: \"ce26720c-05cf-4162-942f-8b94e1bcd43e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791701 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-socket-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791766 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791818 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791854 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791873 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crcd5\" (UniqueName: \"kubernetes.io/projected/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-kube-api-access-crcd5\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791896 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtdm9\" (UniqueName: \"kubernetes.io/projected/277750ca-5f21-4e77-b882-4b79b2ca5932-kube-api-access-mtdm9\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791932 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/277750ca-5f21-4e77-b882-4b79b2ca5932-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791948 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-registration-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.791975 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4759ca8-9ff2-4e1e-a974-880caceeebb6-config\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792028 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dw5wp\" (UniqueName: \"kubernetes.io/projected/d4759ca8-9ff2-4e1e-a974-880caceeebb6-kube-api-access-dw5wp\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792048 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfbqg\" (UniqueName: \"kubernetes.io/projected/20a94b4e-2cd0-430e-9f2f-e805706f3b3d-kube-api-access-hfbqg\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnrft\" (UID: \"20a94b4e-2cd0-430e-9f2f-e805706f3b3d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792075 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89rxh\" (UniqueName: \"kubernetes.io/projected/5bc6fac3-953a-49e0-8743-d36684d2dfb6-kube-api-access-89rxh\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792095 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-config-volume\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792164 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8447a79-6195-4650-95c9-4c0c4207133e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-l9xzk\" (UID: \"a8447a79-6195-4650-95c9-4c0c4207133e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792254 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792278 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-mountpoint-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792295 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s94m6\" (UniqueName: \"kubernetes.io/projected/9f714671-859d-44dc-ad1a-30068d61639c-kube-api-access-s94m6\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792314 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/20a94b4e-2cd0-430e-9f2f-e805706f3b3d-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnrft\" (UID: \"20a94b4e-2cd0-430e-9f2f-e805706f3b3d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792347 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792391 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-apiservice-cert\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792410 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bzvd\" (UniqueName: \"kubernetes.io/projected/e264d3a3-9add-4493-befe-ad59a40f5e5f-kube-api-access-4bzvd\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792473 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792494 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-csi-data-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792510 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8msc\" (UniqueName: \"kubernetes.io/projected/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-kube-api-access-f8msc\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792574 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-certificates\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792657 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-plugins-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792686 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7aa8aba9-4384-470b-ab1b-d8df8efc889d-cert\") pod \"ingress-canary-22nsc\" (UID: \"7aa8aba9-4384-470b-ab1b-d8df8efc889d\") " pod="openshift-ingress-canary/ingress-canary-22nsc" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792810 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xv55\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-kube-api-access-5xv55\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792841 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq757\" (UniqueName: \"kubernetes.io/projected/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-kube-api-access-bq757\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792867 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e264d3a3-9add-4493-befe-ad59a40f5e5f-signing-cabundle\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792941 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-srv-cert\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792968 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277750ca-5f21-4e77-b882-4b79b2ca5932-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.792996 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e264d3a3-9add-4493-befe-ad59a40f5e5f-signing-key\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.793076 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.793145 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-webhook-cert\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.793225 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfhrk\" (UniqueName: \"kubernetes.io/projected/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-kube-api-access-mfhrk\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.793254 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-tls\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.793279 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-config\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.793328 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.793357 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pr9n\" (UniqueName: \"kubernetes.io/projected/a8447a79-6195-4650-95c9-4c0c4207133e-kube-api-access-8pr9n\") pod \"package-server-manager-789f6589d5-l9xzk\" (UID: \"a8447a79-6195-4650-95c9-4c0c4207133e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.815248 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.817158 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-profile-collector-cert\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.817945 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-certificates\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: E0216 11:09:12.821030 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.321007213 +0000 UTC m=+142.950341378 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.833195 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.840852 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-config\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.862000 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-trusted-ca\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.871682 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.878356 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-srv-cert\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.892845 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-tls\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896309 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896567 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-mountpoint-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896603 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s94m6\" (UniqueName: \"kubernetes.io/projected/9f714671-859d-44dc-ad1a-30068d61639c-kube-api-access-s94m6\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896628 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/20a94b4e-2cd0-430e-9f2f-e805706f3b3d-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnrft\" (UID: \"20a94b4e-2cd0-430e-9f2f-e805706f3b3d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896652 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896672 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-apiservice-cert\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896698 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bzvd\" (UniqueName: \"kubernetes.io/projected/e264d3a3-9add-4493-befe-ad59a40f5e5f-kube-api-access-4bzvd\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896727 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-csi-data-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896756 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8msc\" (UniqueName: \"kubernetes.io/projected/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-kube-api-access-f8msc\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896779 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-plugins-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896801 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7aa8aba9-4384-470b-ab1b-d8df8efc889d-cert\") pod \"ingress-canary-22nsc\" (UID: \"7aa8aba9-4384-470b-ab1b-d8df8efc889d\") " pod="openshift-ingress-canary/ingress-canary-22nsc" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896847 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e264d3a3-9add-4493-befe-ad59a40f5e5f-signing-cabundle\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896872 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq757\" (UniqueName: \"kubernetes.io/projected/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-kube-api-access-bq757\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896895 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277750ca-5f21-4e77-b882-4b79b2ca5932-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896915 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e264d3a3-9add-4493-befe-ad59a40f5e5f-signing-key\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896943 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896966 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-webhook-cert\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.896997 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfhrk\" (UniqueName: \"kubernetes.io/projected/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-kube-api-access-mfhrk\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897030 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pr9n\" (UniqueName: \"kubernetes.io/projected/a8447a79-6195-4650-95c9-4c0c4207133e-kube-api-access-8pr9n\") pod \"package-server-manager-789f6589d5-l9xzk\" (UID: \"a8447a79-6195-4650-95c9-4c0c4207133e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897056 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5bc6fac3-953a-49e0-8743-d36684d2dfb6-metrics-tls\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897083 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-secret-volume\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897108 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-proxy-tls\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897136 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctz5q\" (UniqueName: \"kubernetes.io/projected/f50a54ec-5563-4d56-8639-86a6003e0b0e-kube-api-access-ctz5q\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897165 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-node-bootstrap-token\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897223 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5bc6fac3-953a-49e0-8743-d36684d2dfb6-config-volume\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897245 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbvnr\" (UniqueName: \"kubernetes.io/projected/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-kube-api-access-nbvnr\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897267 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9f714671-859d-44dc-ad1a-30068d61639c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897293 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-tmpfs\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897315 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-682n5\" (UniqueName: \"kubernetes.io/projected/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-kube-api-access-682n5\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897335 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk9pf\" (UniqueName: \"kubernetes.io/projected/c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4-kube-api-access-tk9pf\") pod \"migrator-59844c95c7-5xpfr\" (UID: \"c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897351 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-certs\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897372 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9f714671-859d-44dc-ad1a-30068d61639c-srv-cert\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897393 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4759ca8-9ff2-4e1e-a974-880caceeebb6-serving-cert\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897423 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-images\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897441 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ktts\" (UniqueName: \"kubernetes.io/projected/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-kube-api-access-2ktts\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897461 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-config\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897487 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897503 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9x6c\" (UniqueName: \"kubernetes.io/projected/7aa8aba9-4384-470b-ab1b-d8df8efc889d-kube-api-access-k9x6c\") pod \"ingress-canary-22nsc\" (UID: \"7aa8aba9-4384-470b-ab1b-d8df8efc889d\") " pod="openshift-ingress-canary/ingress-canary-22nsc" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897521 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ce26720c-05cf-4162-942f-8b94e1bcd43e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zdcp8\" (UID: \"ce26720c-05cf-4162-942f-8b94e1bcd43e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897543 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7p7h\" (UniqueName: \"kubernetes.io/projected/ce26720c-05cf-4162-942f-8b94e1bcd43e-kube-api-access-c7p7h\") pod \"multus-admission-controller-857f4d67dd-zdcp8\" (UID: \"ce26720c-05cf-4162-942f-8b94e1bcd43e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897560 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-socket-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897578 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897606 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtdm9\" (UniqueName: \"kubernetes.io/projected/277750ca-5f21-4e77-b882-4b79b2ca5932-kube-api-access-mtdm9\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897630 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/277750ca-5f21-4e77-b882-4b79b2ca5932-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897647 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-registration-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897673 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dw5wp\" (UniqueName: \"kubernetes.io/projected/d4759ca8-9ff2-4e1e-a974-880caceeebb6-kube-api-access-dw5wp\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897689 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfbqg\" (UniqueName: \"kubernetes.io/projected/20a94b4e-2cd0-430e-9f2f-e805706f3b3d-kube-api-access-hfbqg\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnrft\" (UID: \"20a94b4e-2cd0-430e-9f2f-e805706f3b3d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897712 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4759ca8-9ff2-4e1e-a974-880caceeebb6-config\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897739 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-config-volume\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897769 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89rxh\" (UniqueName: \"kubernetes.io/projected/5bc6fac3-953a-49e0-8743-d36684d2dfb6-kube-api-access-89rxh\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.897795 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8447a79-6195-4650-95c9-4c0c4207133e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-l9xzk\" (UID: \"a8447a79-6195-4650-95c9-4c0c4207133e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.898538 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-csi-data-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.899316 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-images\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.900887 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-mountpoint-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: E0216 11:09:12.901040 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.401013069 +0000 UTC m=+143.030347234 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.901847 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5bc6fac3-953a-49e0-8743-d36684d2dfb6-config-volume\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.903673 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-config\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.904679 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.905230 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-socket-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.906232 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-tmpfs\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.906438 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xv55\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-kube-api-access-5xv55\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.906815 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-registration-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.907140 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-bound-sa-token\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.907292 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-plugins-dir\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.907600 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e264d3a3-9add-4493-befe-ad59a40f5e5f-signing-cabundle\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.908330 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277750ca-5f21-4e77-b882-4b79b2ca5932-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.908496 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4759ca8-9ff2-4e1e-a974-880caceeebb6-config\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.908920 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.908920 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/31b08225-3ae9-44c9-bcb5-7f34a7b6a969-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cngx5\" (UID: \"31b08225-3ae9-44c9-bcb5-7f34a7b6a969\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.910549 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-config-volume\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.911135 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5bc6fac3-953a-49e0-8743-d36684d2dfb6-metrics-tls\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.914018 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7aa8aba9-4384-470b-ab1b-d8df8efc889d-cert\") pod \"ingress-canary-22nsc\" (UID: \"7aa8aba9-4384-470b-ab1b-d8df8efc889d\") " pod="openshift-ingress-canary/ingress-canary-22nsc" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.914312 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-node-bootstrap-token\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.914614 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-proxy-tls\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.915058 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8447a79-6195-4650-95c9-4c0c4207133e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-l9xzk\" (UID: \"a8447a79-6195-4650-95c9-4c0c4207133e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.915279 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-secret-volume\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.916357 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9f714671-859d-44dc-ad1a-30068d61639c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.920409 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-certs\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.920986 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ce26720c-05cf-4162-942f-8b94e1bcd43e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zdcp8\" (UID: \"ce26720c-05cf-4162-942f-8b94e1bcd43e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.921834 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/277750ca-5f21-4e77-b882-4b79b2ca5932-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.933665 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9f714671-859d-44dc-ad1a-30068d61639c-srv-cert\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.933666 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/20a94b4e-2cd0-430e-9f2f-e805706f3b3d-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnrft\" (UID: \"20a94b4e-2cd0-430e-9f2f-e805706f3b3d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.934036 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-apiservice-cert\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.934639 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4759ca8-9ff2-4e1e-a974-880caceeebb6-serving-cert\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.936161 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e264d3a3-9add-4493-befe-ad59a40f5e5f-signing-key\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.937843 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.938128 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crcd5\" (UniqueName: \"kubernetes.io/projected/7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807-kube-api-access-crcd5\") pod \"catalog-operator-68c6474976-cfspd\" (UID: \"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.938502 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-webhook-cert\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.939087 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.944376 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5"] Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.969107 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctz5q\" (UniqueName: \"kubernetes.io/projected/f50a54ec-5563-4d56-8639-86a6003e0b0e-kube-api-access-ctz5q\") pod \"marketplace-operator-79b997595-khb7r\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.980616 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" Feb 16 11:09:12 crc kubenswrapper[4949]: I0216 11:09:12.999569 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.000155 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.500138106 +0000 UTC m=+143.129472281 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.003458 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.005335 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bzvd\" (UniqueName: \"kubernetes.io/projected/e264d3a3-9add-4493-befe-ad59a40f5e5f-kube-api-access-4bzvd\") pod \"service-ca-9c57cc56f-vwhm9\" (UID: \"e264d3a3-9add-4493-befe-ad59a40f5e5f\") " pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.009111 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-682n5\" (UniqueName: \"kubernetes.io/projected/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-kube-api-access-682n5\") pod \"collect-profiles-29520660-45x59\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.029286 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbvnr\" (UniqueName: \"kubernetes.io/projected/d88b1cf0-2d49-4e1c-a704-ae635bf17d79-kube-api-access-nbvnr\") pod \"machine-config-server-s8znm\" (UID: \"d88b1cf0-2d49-4e1c-a704-ae635bf17d79\") " pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:13 crc kubenswrapper[4949]: W0216 11:09:13.033489 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0492d421_806b_48e4_8a97_3032888e370e.slice/crio-57e1e949c61f9c9df9a49e72f051dbbe968f57db9ef03c6eed54935eaea6adbd WatchSource:0}: Error finding container 57e1e949c61f9c9df9a49e72f051dbbe968f57db9ef03c6eed54935eaea6adbd: Status 404 returned error can't find the container with id 57e1e949c61f9c9df9a49e72f051dbbe968f57db9ef03c6eed54935eaea6adbd Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.061051 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ktts\" (UniqueName: \"kubernetes.io/projected/084fa2ba-3f08-4dae-9e15-2f582f6d3ca7-kube-api-access-2ktts\") pod \"packageserver-d55dfcdfc-l7qdl\" (UID: \"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.091715 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtdm9\" (UniqueName: \"kubernetes.io/projected/277750ca-5f21-4e77-b882-4b79b2ca5932-kube-api-access-mtdm9\") pod \"kube-storage-version-migrator-operator-b67b599dd-8cgj5\" (UID: \"277750ca-5f21-4e77-b882-4b79b2ca5932\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.100264 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.100675 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.101124 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9x6c\" (UniqueName: \"kubernetes.io/projected/7aa8aba9-4384-470b-ab1b-d8df8efc889d-kube-api-access-k9x6c\") pod \"ingress-canary-22nsc\" (UID: \"7aa8aba9-4384-470b-ab1b-d8df8efc889d\") " pod="openshift-ingress-canary/ingress-canary-22nsc" Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.101783 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.601757471 +0000 UTC m=+143.231091836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.122766 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7p7h\" (UniqueName: \"kubernetes.io/projected/ce26720c-05cf-4162-942f-8b94e1bcd43e-kube-api-access-c7p7h\") pod \"multus-admission-controller-857f4d67dd-zdcp8\" (UID: \"ce26720c-05cf-4162-942f-8b94e1bcd43e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.132916 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.144159 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s94m6\" (UniqueName: \"kubernetes.io/projected/9f714671-859d-44dc-ad1a-30068d61639c-kube-api-access-s94m6\") pod \"olm-operator-6b444d44fb-rrm98\" (UID: \"9f714671-859d-44dc-ad1a-30068d61639c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.147834 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.165384 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.172839 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.178370 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pr9n\" (UniqueName: \"kubernetes.io/projected/a8447a79-6195-4650-95c9-4c0c4207133e-kube-api-access-8pr9n\") pod \"package-server-manager-789f6589d5-l9xzk\" (UID: \"a8447a79-6195-4650-95c9-4c0c4207133e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.184814 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk9pf\" (UniqueName: \"kubernetes.io/projected/c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4-kube-api-access-tk9pf\") pod \"migrator-59844c95c7-5xpfr\" (UID: \"c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.192047 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-22nsc" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.202734 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq757\" (UniqueName: \"kubernetes.io/projected/f1e26f51-c378-4a4c-ac9c-d6179acf86ca-kube-api-access-bq757\") pod \"machine-config-controller-84d6567774-clst2\" (UID: \"f1e26f51-c378-4a4c-ac9c-d6179acf86ca\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.204900 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.205373 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.705355988 +0000 UTC m=+143.334690153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.219257 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89rxh\" (UniqueName: \"kubernetes.io/projected/5bc6fac3-953a-49e0-8743-d36684d2dfb6-kube-api-access-89rxh\") pod \"dns-default-5pbl4\" (UID: \"5bc6fac3-953a-49e0-8743-d36684d2dfb6\") " pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.232031 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8msc\" (UniqueName: \"kubernetes.io/projected/7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf-kube-api-access-f8msc\") pod \"csi-hostpathplugin-zsl98\" (UID: \"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf\") " pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.232561 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-s8znm" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.244686 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dw5wp\" (UniqueName: \"kubernetes.io/projected/d4759ca8-9ff2-4e1e-a974-880caceeebb6-kube-api-access-dw5wp\") pod \"service-ca-operator-777779d784-56tdr\" (UID: \"d4759ca8-9ff2-4e1e-a974-880caceeebb6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.266552 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfbqg\" (UniqueName: \"kubernetes.io/projected/20a94b4e-2cd0-430e-9f2f-e805706f3b3d-kube-api-access-hfbqg\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnrft\" (UID: \"20a94b4e-2cd0-430e-9f2f-e805706f3b3d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.309047 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.309773 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.80974544 +0000 UTC m=+143.439079605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.327657 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfhrk\" (UniqueName: \"kubernetes.io/projected/bd39da7f-e838-4de5-9fcb-afb858bfe8ca-kube-api-access-mfhrk\") pod \"machine-api-operator-5694c8668f-9ddvv\" (UID: \"bd39da7f-e838-4de5-9fcb-afb858bfe8ca\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.381212 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.384614 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.391268 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.410901 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.413125 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:13.9131075 +0000 UTC m=+143.542441665 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.413498 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.425551 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.459461 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.460395 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.486581 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.499239 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s"] Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.504200 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.513074 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.513685 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.013656541 +0000 UTC m=+143.642990706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.513987 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.514603 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.01458046 +0000 UTC m=+143.643914625 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.524232 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv"] Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.530760 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-zsl98" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.552059 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" event={"ID":"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1","Type":"ContainerStarted","Data":"2eb510b1aa0236ed2236967ff9f2b94690546a9a05ec55289ac66f54bd7ba539"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.552260 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" event={"ID":"4c93693d-d1ae-41d1-9ed6-744dc03aa0b1","Type":"ContainerStarted","Data":"661d39613bcf5196f4833eca9d2b60c3d8b5b5ca1df700041e90611f561e432e"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.567009 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-tt99q"] Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.608084 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-kwq47" podStartSLOduration=121.60806507 podStartE2EDuration="2m1.60806507s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:13.57199197 +0000 UTC m=+143.201326135" watchObservedRunningTime="2026-02-16 11:09:13.60806507 +0000 UTC m=+143.237399235" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.616985 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.617446 4949 generic.go:334] "Generic (PLEG): container finished" podID="a398cced-c30a-4638-96c2-c7fa84672dab" containerID="c8811b210c65c68b93683986a546522bb766088996d83242779dea4ace9496d4" exitCode=0 Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.617559 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.117541437 +0000 UTC m=+143.746875602 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.617658 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" event={"ID":"a398cced-c30a-4638-96c2-c7fa84672dab","Type":"ContainerDied","Data":"c8811b210c65c68b93683986a546522bb766088996d83242779dea4ace9496d4"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.624194 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" event={"ID":"73ad7549-c192-4d13-b957-940364f2911b","Type":"ContainerStarted","Data":"fcd03339c1ebe18ab02a11902f3bb8d5acb19649244de1b5491a6ca5459baf92"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.626100 4949 generic.go:334] "Generic (PLEG): container finished" podID="df28feb8-3f45-493c-a794-bb64821b0fb0" containerID="92bb420d6fa98e7ed0c669d3e3b6dcd12e6f6a80f23c3243ef6c9eab560b59b3" exitCode=0 Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.626341 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" event={"ID":"df28feb8-3f45-493c-a794-bb64821b0fb0","Type":"ContainerDied","Data":"92bb420d6fa98e7ed0c669d3e3b6dcd12e6f6a80f23c3243ef6c9eab560b59b3"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.699806 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" event={"ID":"5a74756d-ce49-4ba1-9dcb-fedee6464166","Type":"ContainerStarted","Data":"faa3f82d5744b58dfc949ee88396740ccd7d09a7bba13ce94996c23b9aada7ba"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.699875 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" event={"ID":"5a74756d-ce49-4ba1-9dcb-fedee6464166","Type":"ContainerStarted","Data":"fe83a5908fa638aba90c702efafd953b7a7510cace7c53a9da0bd7324efe74ba"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.701924 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-t47hp" event={"ID":"048dcd85-c085-40e8-b952-d97abe29ac36","Type":"ContainerStarted","Data":"7c6df036272c52dd0fb4fe3d5f9aafffba7eb293c96f563844cb6a072cb3ac88"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.701954 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-t47hp" event={"ID":"048dcd85-c085-40e8-b952-d97abe29ac36","Type":"ContainerStarted","Data":"3d28a563d2106ac7494acb8285b252c316690447cc7dec037e44d4ef45684c51"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.754714 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n4llx" podStartSLOduration=121.754679626 podStartE2EDuration="2m1.754679626s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:13.733107779 +0000 UTC m=+143.362441944" watchObservedRunningTime="2026-02-16 11:09:13.754679626 +0000 UTC m=+143.384013791" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.759357 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.763094 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.263078289 +0000 UTC m=+143.892412454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.785663 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" event={"ID":"0492d421-806b-48e4-8a97-3032888e370e","Type":"ContainerStarted","Data":"57e1e949c61f9c9df9a49e72f051dbbe968f57db9ef03c6eed54935eaea6adbd"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.787544 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.794636 4949 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-p2jx5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.794862 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" podUID="0492d421-806b-48e4-8a97-3032888e370e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.813714 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" event={"ID":"5e66d330-dc75-4a98-9739-589d8df61a68","Type":"ContainerStarted","Data":"0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a"} Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.814035 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.819366 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.819437 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.841795 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5xfq9"] Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.848382 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mwmrl"] Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.852461 4949 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-drxpj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.852519 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" podUID="d2690ae1-0168-43e5-aa99-3e926f6979d8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.863939 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.867105 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.367070838 +0000 UTC m=+143.996405003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.928425 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4"] Feb 16 11:09:13 crc kubenswrapper[4949]: I0216 11:09:13.968750 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:13 crc kubenswrapper[4949]: E0216 11:09:13.976893 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.47686331 +0000 UTC m=+144.106197665 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.029558 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fkf4v" podStartSLOduration=122.029540171 podStartE2EDuration="2m2.029540171s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:13.993218372 +0000 UTC m=+143.622552537" watchObservedRunningTime="2026-02-16 11:09:14.029540171 +0000 UTC m=+143.658874336" Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.071800 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.571731753 +0000 UTC m=+144.201065918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.071319 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.072498 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.073039 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.573029824 +0000 UTC m=+144.202363989 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.173670 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.174692 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.674645229 +0000 UTC m=+144.303979394 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.277510 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.278506 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.778490554 +0000 UTC m=+144.407824719 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.374127 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-2j2dm" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.398889 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.400079 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.900052314 +0000 UTC m=+144.529386479 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.402184 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.402830 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:14.90280975 +0000 UTC m=+144.532143915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.473774 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.484508 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:14 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:14 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:14 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.484611 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.510284 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.511018 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.010991411 +0000 UTC m=+144.640325576 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.552810 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268"] Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.615889 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.616521 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.116500288 +0000 UTC m=+144.745834453 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.624825 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5"] Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.664586 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rk65d"] Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.699860 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-2j2dm" podStartSLOduration=122.69982976 podStartE2EDuration="2m2.69982976s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:14.653717894 +0000 UTC m=+144.283052069" watchObservedRunningTime="2026-02-16 11:09:14.69982976 +0000 UTC m=+144.329163925" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.702319 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd"] Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.722184 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.722845 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.22281701 +0000 UTC m=+144.852151175 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.725336 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59"] Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.808294 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" podStartSLOduration=122.808260518 podStartE2EDuration="2m2.808260518s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:14.805990667 +0000 UTC m=+144.435324842" watchObservedRunningTime="2026-02-16 11:09:14.808260518 +0000 UTC m=+144.437594683" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.816212 4949 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-g4khj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.6:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.816269 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.6:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.824135 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.824833 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.324813197 +0000 UTC m=+144.954147362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.848302 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" event={"ID":"b8314a72-2893-476e-849e-33cd71b1ebd5","Type":"ContainerStarted","Data":"0b976053b690c9b92bb43c551ed0c1afa3c295c5f9ad5574a3601f4fd14bb458"} Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.850302 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" event={"ID":"6360e7de-ae30-4cfc-8450-a0eaff573c5c","Type":"ContainerStarted","Data":"c1154b4909994a79f0d22f5e0331224adcc4affb6a9b86bd32e5e6b2021c4608"} Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.858114 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" event={"ID":"afc70085-b399-4f43-a311-0e38471ad055","Type":"ContainerStarted","Data":"64bf1714c4bc6e457d890f687849473247cdb00d30d9cfbb879115ad0aaa5663"} Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.859632 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmlk2" podStartSLOduration=122.859619868 podStartE2EDuration="2m2.859619868s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:14.832786387 +0000 UTC m=+144.462120552" watchObservedRunningTime="2026-02-16 11:09:14.859619868 +0000 UTC m=+144.488954033" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.865949 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" event={"ID":"5a74756d-ce49-4ba1-9dcb-fedee6464166","Type":"ContainerStarted","Data":"fb3ea2458f6cc54309d94d99e326f2ef49378f25256cfab4b95219213be4b0f4"} Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.882444 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" event={"ID":"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807","Type":"ContainerStarted","Data":"0cc798dfd96c05d49244354bb7892c9acde0cec31c962eee84126d5a087feb52"} Feb 16 11:09:14 crc kubenswrapper[4949]: W0216 11:09:14.894874 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc6af30d_b0c6_47f1_90d2_9d297a7d2b23.slice/crio-42b65871004c00b4cf69f142a929de242955c36313b0c18ce6fee72e09c440e4 WatchSource:0}: Error finding container 42b65871004c00b4cf69f142a929de242955c36313b0c18ce6fee72e09c440e4: Status 404 returned error can't find the container with id 42b65871004c00b4cf69f142a929de242955c36313b0c18ce6fee72e09c440e4 Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.896326 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" event={"ID":"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5","Type":"ContainerStarted","Data":"54fcf02db326d6c328d0cd4ecbbe1f03f4a51c4e34f8620b7c3cf5452794e85e"} Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.899339 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5xfq9" event={"ID":"468461de-4a56-47b0-a5a9-cf6e51b6de47","Type":"ContainerStarted","Data":"ff680392fb14b709b616e517bf0a18415e432b73d01a7d2c3fbf256cd1917130"} Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.902692 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" event={"ID":"0492d421-806b-48e4-8a97-3032888e370e","Type":"ContainerStarted","Data":"065bbd95ddd3952eb45b48100cfb7573c85243ba02a3caaa76a856b67f1720ef"} Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.913037 4949 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-p2jx5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.913086 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" podUID="0492d421-806b-48e4-8a97-3032888e370e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.924902 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:14 crc kubenswrapper[4949]: E0216 11:09:14.926248 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.426227646 +0000 UTC m=+145.055561811 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:14 crc kubenswrapper[4949]: W0216 11:09:14.951901 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31b08225_3ae9_44c9_bcb5_7f34a7b6a969.slice/crio-cb6ad7d8b5bfb8d40231c2acb7b757cead5a91cf6b984d4ec76f54c484a0d8f7 WatchSource:0}: Error finding container cb6ad7d8b5bfb8d40231c2acb7b757cead5a91cf6b984d4ec76f54c484a0d8f7: Status 404 returned error can't find the container with id cb6ad7d8b5bfb8d40231c2acb7b757cead5a91cf6b984d4ec76f54c484a0d8f7 Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.961471 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-s8znm" event={"ID":"d88b1cf0-2d49-4e1c-a704-ae635bf17d79","Type":"ContainerStarted","Data":"4a0de0708b6572472ae6c7944a1d2be6e1c8b18c363110264ffd1ed0f468a1c9"} Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.983506 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kq7f7" podStartSLOduration=122.98348636 podStartE2EDuration="2m2.98348636s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:14.971792754 +0000 UTC m=+144.601126929" watchObservedRunningTime="2026-02-16 11:09:14.98348636 +0000 UTC m=+144.612820525" Feb 16 11:09:14 crc kubenswrapper[4949]: I0216 11:09:14.989615 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" event={"ID":"b292c120-8566-4d1a-b522-1739f12db3ab","Type":"ContainerStarted","Data":"50ae8df58e2b13ae21b3d24ad5a8b26bec784e3109329435b6a51bdaea54ed89"} Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.001367 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" event={"ID":"713cc598-c331-4416-9826-0418c542a29b","Type":"ContainerStarted","Data":"1838d212fbb0026686885f4505df4a3d21ca0b400b62d5f71ef9346ff26ed537"} Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.004296 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" podStartSLOduration=122.004273592 podStartE2EDuration="2m2.004273592s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:15.000522324 +0000 UTC m=+144.629856489" watchObservedRunningTime="2026-02-16 11:09:15.004273592 +0000 UTC m=+144.633607757" Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.031630 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.057389 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.557354185 +0000 UTC m=+145.186688350 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.065912 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-488fx" podStartSLOduration=123.065880833 podStartE2EDuration="2m3.065880833s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:15.056949993 +0000 UTC m=+144.686284158" watchObservedRunningTime="2026-02-16 11:09:15.065880833 +0000 UTC m=+144.695214998" Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.115249 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.156767 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.157623 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.657600198 +0000 UTC m=+145.286934363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.159627 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-22nsc"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.159745 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.182417 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" podStartSLOduration=123.182393575 podStartE2EDuration="2m3.182393575s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:15.124090487 +0000 UTC m=+144.753424652" watchObservedRunningTime="2026-02-16 11:09:15.182393575 +0000 UTC m=+144.811727740" Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.183213 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.272981 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.273574 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.773557972 +0000 UTC m=+145.402892127 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.275837 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-5nxhr" podStartSLOduration=123.275801432 podStartE2EDuration="2m3.275801432s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:15.21766099 +0000 UTC m=+144.846995155" watchObservedRunningTime="2026-02-16 11:09:15.275801432 +0000 UTC m=+144.905135597" Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.314980 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-t47hp" podStartSLOduration=123.314938739 podStartE2EDuration="2m3.314938739s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:15.259229463 +0000 UTC m=+144.888563628" watchObservedRunningTime="2026-02-16 11:09:15.314938739 +0000 UTC m=+144.944272904" Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.371888 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vwhm9"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.371962 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-5pbl4"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.371978 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-khb7r"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.372000 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zdcp8"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.373005 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.375658 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.379393 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.382011 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.881952249 +0000 UTC m=+145.511286414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.382479 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zsl98"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.393889 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.477754 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:15 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:15 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:15 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.478571 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.489389 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.490257 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:15.990239413 +0000 UTC m=+145.619573578 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.592535 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.593133 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.093109088 +0000 UTC m=+145.722443253 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.612782 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.612854 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-clst2"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.632235 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-9ddvv"] Feb 16 11:09:15 crc kubenswrapper[4949]: W0216 11:09:15.649692 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8447a79_6195_4650_95c9_4c0c4207133e.slice/crio-3664d1c112530f2a0eee400e953f2b5514aad41c1c5b6068085136fef883dfde WatchSource:0}: Error finding container 3664d1c112530f2a0eee400e953f2b5514aad41c1c5b6068085136fef883dfde: Status 404 returned error can't find the container with id 3664d1c112530f2a0eee400e953f2b5514aad41c1c5b6068085136fef883dfde Feb 16 11:09:15 crc kubenswrapper[4949]: W0216 11:09:15.654672 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1e26f51_c378_4a4c_ac9c_d6179acf86ca.slice/crio-222a8c845c6ee847471bb4752d0ace974dda52f477ac15587e33e90bc1c04b7a WatchSource:0}: Error finding container 222a8c845c6ee847471bb4752d0ace974dda52f477ac15587e33e90bc1c04b7a: Status 404 returned error can't find the container with id 222a8c845c6ee847471bb4752d0ace974dda52f477ac15587e33e90bc1c04b7a Feb 16 11:09:15 crc kubenswrapper[4949]: W0216 11:09:15.686278 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f714671_859d_44dc_ad1a_30068d61639c.slice/crio-81eb71975b1fcccf6a6ba11c6bca3426bbd212de379740779dbc6de8ee06e656 WatchSource:0}: Error finding container 81eb71975b1fcccf6a6ba11c6bca3426bbd212de379740779dbc6de8ee06e656: Status 404 returned error can't find the container with id 81eb71975b1fcccf6a6ba11c6bca3426bbd212de379740779dbc6de8ee06e656 Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.690850 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-56tdr"] Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.694803 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.703957 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.203931591 +0000 UTC m=+145.833265756 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.795759 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.795947 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.295907974 +0000 UTC m=+145.925242139 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.796125 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.796989 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.296981468 +0000 UTC m=+145.926315633 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.900046 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:15 crc kubenswrapper[4949]: E0216 11:09:15.900915 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.400893505 +0000 UTC m=+146.030227670 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:15 crc kubenswrapper[4949]: I0216 11:09:15.938118 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.003870 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.005417 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.50539713 +0000 UTC m=+146.134731295 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.059902 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" event={"ID":"20a94b4e-2cd0-430e-9f2f-e805706f3b3d","Type":"ContainerStarted","Data":"727f04a58933eacf362cc5d9c647b623d0b56c09261ce79b6c8b634d30e8eada"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.093477 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" event={"ID":"31b08225-3ae9-44c9-bcb5-7f34a7b6a969","Type":"ContainerStarted","Data":"cb6ad7d8b5bfb8d40231c2acb7b757cead5a91cf6b984d4ec76f54c484a0d8f7"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.106986 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.107367 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.607348676 +0000 UTC m=+146.236682841 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.114770 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" event={"ID":"ce26720c-05cf-4162-942f-8b94e1bcd43e","Type":"ContainerStarted","Data":"41871abd16119d8132d0d6f2e1e9cde985351db96d240cd0b659ef4c88d16b23"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.137124 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" event={"ID":"bd39da7f-e838-4de5-9fcb-afb858bfe8ca","Type":"ContainerStarted","Data":"f3fc051c730e08c8a1e091c0d5bb7a6e3b63c53476f24c98b62512a7cace09a5"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.155077 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" event={"ID":"6360e7de-ae30-4cfc-8450-a0eaff573c5c","Type":"ContainerStarted","Data":"d6bc3202ddce05ad4cb4044d52532d0a58c4be309bfa7daace14a3cfa0dd37b7"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.155157 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" event={"ID":"6360e7de-ae30-4cfc-8450-a0eaff573c5c","Type":"ContainerStarted","Data":"4df20626e507621265b238f4156540d5e1ca1bad4d10758c39fe413c39606244"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.160532 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" event={"ID":"f50a54ec-5563-4d56-8639-86a6003e0b0e","Type":"ContainerStarted","Data":"2ae1d2b20d0df819544681e0e945094e62079304da890f2c8d4e71669b06f0ba"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.176812 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" event={"ID":"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7","Type":"ContainerStarted","Data":"3268ec6848f41fabe4d223ada919367610582f93b1570e629fc6fce34671fd6a"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.176874 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" event={"ID":"084fa2ba-3f08-4dae-9e15-2f582f6d3ca7","Type":"ContainerStarted","Data":"4764303acaa403c0cd1f802c26961d8235b825b5cfbbf56e3b5aab6c0373218e"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.177722 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.189524 4949 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-l7qdl container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" start-of-body= Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.189594 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" podUID="084fa2ba-3f08-4dae-9e15-2f582f6d3ca7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.191526 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-82fv4" podStartSLOduration=123.191503893 podStartE2EDuration="2m3.191503893s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.188316603 +0000 UTC m=+145.817650778" watchObservedRunningTime="2026-02-16 11:09:16.191503893 +0000 UTC m=+145.820838058" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.192503 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-s8znm" event={"ID":"d88b1cf0-2d49-4e1c-a704-ae635bf17d79","Type":"ContainerStarted","Data":"5d7aaf0759b7f949b6ba6a496072673f587cbbf8878b402e1592c417be6b9992"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.213422 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.213985 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.713959807 +0000 UTC m=+146.343293972 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.213454 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-22nsc" event={"ID":"7aa8aba9-4384-470b-ab1b-d8df8efc889d","Type":"ContainerStarted","Data":"79b1d2b548c80abc9b8162a44ac5be21038644180dda8c05e2d4e461ae6eb63d"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.248079 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" event={"ID":"a398cced-c30a-4638-96c2-c7fa84672dab","Type":"ContainerStarted","Data":"c7364f80c3310518580aae8f8cd69a862a0ae7ffb3524665e8da33ff999d857f"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.312900 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" event={"ID":"7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807","Type":"ContainerStarted","Data":"70cc3f1903bf3127e89cfd34d33aa13332870be11282dfeb7866f7892d36dbf8"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.313307 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.315437 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.317340 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.817314976 +0000 UTC m=+146.446649141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.324428 4949 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-cfspd container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.324518 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" podUID="7b6f1d40-6ea7-4fd7-bb6f-c2603bf70807" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.329886 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" podStartSLOduration=123.329829329 podStartE2EDuration="2m3.329829329s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.271403247 +0000 UTC m=+145.900737412" watchObservedRunningTime="2026-02-16 11:09:16.329829329 +0000 UTC m=+145.959163494" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.330038 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-s8znm" podStartSLOduration=6.330031555 podStartE2EDuration="6.330031555s" podCreationTimestamp="2026-02-16 11:09:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.304965179 +0000 UTC m=+145.934299354" watchObservedRunningTime="2026-02-16 11:09:16.330031555 +0000 UTC m=+145.959365720" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.390319 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" event={"ID":"e264d3a3-9add-4493-befe-ad59a40f5e5f","Type":"ContainerStarted","Data":"cf473a11fa94c7cb09d7d89cc03eed4eca206270f5a05c4ba4e8f701d54fbc90"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.411449 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" event={"ID":"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23","Type":"ContainerStarted","Data":"ced7e0acb5315baa549cbfb4b821348a6f1b8f552adb2f7597fe780b7fe351fa"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.411536 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" event={"ID":"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23","Type":"ContainerStarted","Data":"42b65871004c00b4cf69f142a929de242955c36313b0c18ce6fee72e09c440e4"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.416893 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.418935 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:16.918919961 +0000 UTC m=+146.548254126 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.455747 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" podStartSLOduration=124.455726954 podStartE2EDuration="2m4.455726954s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.454633619 +0000 UTC m=+146.083967784" watchObservedRunningTime="2026-02-16 11:09:16.455726954 +0000 UTC m=+146.085061119" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.457796 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" podStartSLOduration=123.457787738 podStartE2EDuration="2m3.457787738s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.378539685 +0000 UTC m=+146.007873870" watchObservedRunningTime="2026-02-16 11:09:16.457787738 +0000 UTC m=+146.087121903" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.463188 4949 generic.go:334] "Generic (PLEG): container finished" podID="b8314a72-2893-476e-849e-33cd71b1ebd5" containerID="22a62b16967c7686fb8ec17ad60fe8ec485d8b0da0ccd20fef0cb6475a0dde70" exitCode=0 Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.464955 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" event={"ID":"b8314a72-2893-476e-849e-33cd71b1ebd5","Type":"ContainerDied","Data":"22a62b16967c7686fb8ec17ad60fe8ec485d8b0da0ccd20fef0cb6475a0dde70"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.471975 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5pbl4" event={"ID":"5bc6fac3-953a-49e0-8743-d36684d2dfb6","Type":"ContainerStarted","Data":"f5c5ea462aea882755907b1d543ec18a5fa4fce39af10f91c3998002e92e11af"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.472907 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:16 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:16 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:16 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.473532 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.484377 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" event={"ID":"b60d988a-5b8e-4fdd-94f4-b61faaf6b5a5","Type":"ContainerStarted","Data":"165cc7680273ff0acbf37b6341d3aca2720a935676164dde1b5f428ba08747b1"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.495783 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" event={"ID":"afc70085-b399-4f43-a311-0e38471ad055","Type":"ContainerStarted","Data":"2c452c0f0083bafd61ead8d584f827dd8a4bbe6628298b0fe92364682960269b"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.510951 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" event={"ID":"a8447a79-6195-4650-95c9-4c0c4207133e","Type":"ContainerStarted","Data":"3664d1c112530f2a0eee400e953f2b5514aad41c1c5b6068085136fef883dfde"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.526795 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.528485 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.028461963 +0000 UTC m=+146.657796128 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.536336 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsl98" event={"ID":"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf","Type":"ContainerStarted","Data":"100a1773525c4b6d1a214ba7c6bb9d4eb8e5566db79c6aee96a9cba2fa4f3b1c"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.565082 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" event={"ID":"30431645-dce8-4da3-a237-1ec14d3b2c73","Type":"ContainerStarted","Data":"6862ef30e6ebfe8d079052deba783a972e4194a151eb65494e282a3e8768e0ee"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.577635 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j5268" podStartSLOduration=123.577616954 podStartE2EDuration="2m3.577616954s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.550475953 +0000 UTC m=+146.179810118" watchObservedRunningTime="2026-02-16 11:09:16.577616954 +0000 UTC m=+146.206951119" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.578042 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ld5mv" podStartSLOduration=124.578036547 podStartE2EDuration="2m4.578036547s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.57556356 +0000 UTC m=+146.204897725" watchObservedRunningTime="2026-02-16 11:09:16.578036547 +0000 UTC m=+146.207370712" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.630390 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.632783 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.132764353 +0000 UTC m=+146.762098518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.649445 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" event={"ID":"df28feb8-3f45-493c-a794-bb64821b0fb0","Type":"ContainerStarted","Data":"219609438fee47cd35aea5d93fffdd70ac77c3393ecd2819de0277cd0490899d"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.703709 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" event={"ID":"277750ca-5f21-4e77-b882-4b79b2ca5932","Type":"ContainerStarted","Data":"e7650414c01fa0d25ca577842fc92fcadc7f8ef639aafdbff740986bb1b9588f"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.703773 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" event={"ID":"277750ca-5f21-4e77-b882-4b79b2ca5932","Type":"ContainerStarted","Data":"9e7af60b427b1fdf1e3353e242a27aa4a11eb1632ca3937f89d869a7e52de317"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.708061 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" event={"ID":"b292c120-8566-4d1a-b522-1739f12db3ab","Type":"ContainerStarted","Data":"4ca119ac54741b103d5ab8258575a13c3b82d0e423439223dfc4ca085117467a"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.711311 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" event={"ID":"d4759ca8-9ff2-4e1e-a974-880caceeebb6","Type":"ContainerStarted","Data":"dcbf61a603de63706d1d948ec96a0a0b8c6451d838cd580df3115c112ce331c6"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.741567 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" event={"ID":"f1e26f51-c378-4a4c-ac9c-d6179acf86ca","Type":"ContainerStarted","Data":"222a8c845c6ee847471bb4752d0ace974dda52f477ac15587e33e90bc1c04b7a"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.742346 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.743740 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.24371821 +0000 UTC m=+146.873052385 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.821529 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" podStartSLOduration=123.821510598 podStartE2EDuration="2m3.821510598s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.79314837 +0000 UTC m=+146.422482535" watchObservedRunningTime="2026-02-16 11:09:16.821510598 +0000 UTC m=+146.450844764" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.844082 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.846158 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.34612088 +0000 UTC m=+146.975455045 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.915710 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" event={"ID":"713cc598-c331-4416-9826-0418c542a29b","Type":"ContainerStarted","Data":"1f4764b1eab9a6f4ba48237f23768929002ebdd8c730f189d16e3a412141854a"} Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.946041 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:16 crc kubenswrapper[4949]: E0216 11:09:16.946667 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.44664025 +0000 UTC m=+147.075974415 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.982047 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-mwmrl" podStartSLOduration=124.982023379 podStartE2EDuration="2m4.982023379s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.876297616 +0000 UTC m=+146.505631781" watchObservedRunningTime="2026-02-16 11:09:16.982023379 +0000 UTC m=+146.611357544" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.983905 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8cgj5" podStartSLOduration=123.983898838 podStartE2EDuration="2m3.983898838s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:16.980793611 +0000 UTC m=+146.610127776" watchObservedRunningTime="2026-02-16 11:09:16.983898838 +0000 UTC m=+146.613233003" Feb 16 11:09:16 crc kubenswrapper[4949]: I0216 11:09:16.985660 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" event={"ID":"c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4","Type":"ContainerStarted","Data":"4dd1a9f824f4dd1fdaa89ce0c7233ac481e89d7ed7a105ea7585b69314abc182"} Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.028062 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" podStartSLOduration=124.028041642 podStartE2EDuration="2m4.028041642s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:17.019562206 +0000 UTC m=+146.648896371" watchObservedRunningTime="2026-02-16 11:09:17.028041642 +0000 UTC m=+146.657375807" Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.041253 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" event={"ID":"9f714671-859d-44dc-ad1a-30068d61639c","Type":"ContainerStarted","Data":"81eb71975b1fcccf6a6ba11c6bca3426bbd212de379740779dbc6de8ee06e656"} Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.041851 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.051555 4949 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-rrm98 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.051627 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" podUID="9f714671-859d-44dc-ad1a-30068d61639c" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.057579 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.065603 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.565585729 +0000 UTC m=+147.194919894 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.076161 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5xfq9" event={"ID":"468461de-4a56-47b0-a5a9-cf6e51b6de47","Type":"ContainerStarted","Data":"870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a"} Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.109652 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.131256 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" podStartSLOduration=124.131229786 podStartE2EDuration="2m4.131229786s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:17.129895724 +0000 UTC m=+146.759229899" watchObservedRunningTime="2026-02-16 11:09:17.131229786 +0000 UTC m=+146.760563951" Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.159754 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.161326 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.661309389 +0000 UTC m=+147.290643554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.263976 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.264620 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.764603996 +0000 UTC m=+147.393938151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.315149 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-5xfq9" podStartSLOduration=125.3151223 podStartE2EDuration="2m5.3151223s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:17.311859018 +0000 UTC m=+146.941193183" watchObservedRunningTime="2026-02-16 11:09:17.3151223 +0000 UTC m=+146.944456465" Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.373856 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.374558 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.874530802 +0000 UTC m=+147.503864967 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.476429 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.477002 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:17.976977993 +0000 UTC m=+147.606312158 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.488842 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:17 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:17 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:17 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.488914 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.580446 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.581402 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.081377125 +0000 UTC m=+147.710711290 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.683158 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.683716 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.183699972 +0000 UTC m=+147.813034137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.769640 4949 csr.go:261] certificate signing request csr-9zqmw is approved, waiting to be issued Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.779113 4949 csr.go:257] certificate signing request csr-9zqmw is issued Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.786972 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.787676 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.28765499 +0000 UTC m=+147.916989155 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.890847 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.891511 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.391404842 +0000 UTC m=+148.020739007 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.992407 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.992678 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.492617965 +0000 UTC m=+148.121952130 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:17 crc kubenswrapper[4949]: I0216 11:09:17.992922 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:17 crc kubenswrapper[4949]: E0216 11:09:17.993282 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.493266285 +0000 UTC m=+148.122600450 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.094939 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.095487 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.595466218 +0000 UTC m=+148.224800383 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.164920 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" event={"ID":"9f714671-859d-44dc-ad1a-30068d61639c","Type":"ContainerStarted","Data":"032f49696ef3b8e93ee9fbc87a37a53b713e09cf6fae1e726be4f2f833c7d4c0"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.166240 4949 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-rrm98 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.166289 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" podUID="9f714671-859d-44dc-ad1a-30068d61639c" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.180632 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" event={"ID":"f50a54ec-5563-4d56-8639-86a6003e0b0e","Type":"ContainerStarted","Data":"42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.181788 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.184155 4949 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-khb7r container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.184400 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.198397 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.198847 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.698832658 +0000 UTC m=+148.328166823 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.199434 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" event={"ID":"b8314a72-2893-476e-849e-33cd71b1ebd5","Type":"ContainerStarted","Data":"e18ef8bf498cd236c841ddd17c9b3e247616a8f58056af118495d2a7b7e41ea4"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.199714 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.208129 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" event={"ID":"20a94b4e-2cd0-430e-9f2f-e805706f3b3d","Type":"ContainerStarted","Data":"f155a7012d1881f1cb6e37be735a5158f57ebcc0f9a76d95141828871fce8cf6"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.236638 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" podStartSLOduration=125.236615592 podStartE2EDuration="2m5.236615592s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.2346458 +0000 UTC m=+147.863979965" watchObservedRunningTime="2026-02-16 11:09:18.236615592 +0000 UTC m=+147.865949757" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.237667 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5pbl4" event={"ID":"5bc6fac3-953a-49e0-8743-d36684d2dfb6","Type":"ContainerStarted","Data":"0a63cff7cd89c56f6794f735543485e43fbb1a43bc74304b3d5f5c3492a4f104"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.237735 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5pbl4" event={"ID":"5bc6fac3-953a-49e0-8743-d36684d2dfb6","Type":"ContainerStarted","Data":"76fdd114ee503527bf9d5ec09811405a78d867764acfe20f091eab3ba2d9f0c7"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.238774 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.247623 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" event={"ID":"d4759ca8-9ff2-4e1e-a974-880caceeebb6","Type":"ContainerStarted","Data":"e708b13019727b0d6c64b724a2736e06b23677cf7b7de4927512887b72c54046"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.273613 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" event={"ID":"f1e26f51-c378-4a4c-ac9c-d6179acf86ca","Type":"ContainerStarted","Data":"dde201ab7573b138c3709b2232e9cf26ff2a4c9423f88ca56830e65120a042c4"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.273684 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" event={"ID":"f1e26f51-c378-4a4c-ac9c-d6179acf86ca","Type":"ContainerStarted","Data":"7f71eae21b471a466c642383ecde14ea87d959c62df43bac4f62a549bfef76c2"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.275329 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnrft" podStartSLOduration=125.275305345 podStartE2EDuration="2m5.275305345s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.272459316 +0000 UTC m=+147.901793501" watchObservedRunningTime="2026-02-16 11:09:18.275305345 +0000 UTC m=+147.904639510" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.293198 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" event={"ID":"30431645-dce8-4da3-a237-1ec14d3b2c73","Type":"ContainerStarted","Data":"60949f99dad1212e856afaf85860bd64b25fd3f7fa049e0f67a2a50a24151b64"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.293264 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" event={"ID":"30431645-dce8-4da3-a237-1ec14d3b2c73","Type":"ContainerStarted","Data":"d5815cc994d191b555913600bf3c25675bb7d8846ebbc5daef6781fcb6ce0ba5"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.303087 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.303629 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.80356078 +0000 UTC m=+148.432895085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.303766 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6p46s" event={"ID":"713cc598-c331-4416-9826-0418c542a29b","Type":"ContainerStarted","Data":"d7d2c2119d1b387c13be4b4fe34dfb27ab41e7ba3aad92594a5eddd0fcca9517"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.303884 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.306111 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.80609435 +0000 UTC m=+148.435428725 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.318916 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" podStartSLOduration=126.318880791 podStartE2EDuration="2m6.318880791s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.314878965 +0000 UTC m=+147.944213130" watchObservedRunningTime="2026-02-16 11:09:18.318880791 +0000 UTC m=+147.948214956" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.333604 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" event={"ID":"31b08225-3ae9-44c9-bcb5-7f34a7b6a969","Type":"ContainerStarted","Data":"11b516852c25b728041c40a2bd1198ea20b00bfb28b947594138cba4e5d8a253"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.362919 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" event={"ID":"a398cced-c30a-4638-96c2-c7fa84672dab","Type":"ContainerStarted","Data":"64809ae5a313b5e90b387b51d32d37349cb0eed6522741a4186c6303d3a1289c"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.367287 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-rk65d" podStartSLOduration=126.367273427 podStartE2EDuration="2m6.367273427s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.357554103 +0000 UTC m=+147.986888268" watchObservedRunningTime="2026-02-16 11:09:18.367273427 +0000 UTC m=+147.996607592" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.397212 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" event={"ID":"ce26720c-05cf-4162-942f-8b94e1bcd43e","Type":"ContainerStarted","Data":"c0eb1698018e2cac1ce02e657a3773594bef73039fc3ec7da3f9caf96a651bcc"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.408898 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.409532 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:18.90948138 +0000 UTC m=+148.538815545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.421858 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-clst2" podStartSLOduration=125.421828237 podStartE2EDuration="2m5.421828237s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.420075972 +0000 UTC m=+148.049410127" watchObservedRunningTime="2026-02-16 11:09:18.421828237 +0000 UTC m=+148.051162422" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.429700 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" event={"ID":"bd39da7f-e838-4de5-9fcb-afb858bfe8ca","Type":"ContainerStarted","Data":"01136031dd046cb0d24f96c118e6372f0cc81e80d565cc82debeea1a2c4dc822"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.429784 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" event={"ID":"bd39da7f-e838-4de5-9fcb-afb858bfe8ca","Type":"ContainerStarted","Data":"7891343c2015800c37c88cb7ff601851572c5d2acaba9b7d64fa474bf4a55a8c"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.461559 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" event={"ID":"a8447a79-6195-4650-95c9-4c0c4207133e","Type":"ContainerStarted","Data":"22787e3c182d8616659b2e9958d2bbff0c96f116dadfd8e328c620169a8fc204"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.461601 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" event={"ID":"a8447a79-6195-4650-95c9-4c0c4207133e","Type":"ContainerStarted","Data":"a1234921fc1eeb8f86203efcdf6f16c98ed9204029cb7b4a4ee05d427f6e3665"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.462354 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.471943 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:18 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:18 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:18 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.472033 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.489111 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-5pbl4" podStartSLOduration=8.489078355 podStartE2EDuration="8.489078355s" podCreationTimestamp="2026-02-16 11:09:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.486531715 +0000 UTC m=+148.115865880" watchObservedRunningTime="2026-02-16 11:09:18.489078355 +0000 UTC m=+148.118412530" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.494657 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-22nsc" event={"ID":"7aa8aba9-4384-470b-ab1b-d8df8efc889d","Type":"ContainerStarted","Data":"11864827cb9fae1bbed06a46a37f8cbe80e590e85c064a98d3a4977e5605ffd9"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.515151 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.517491 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.017474595 +0000 UTC m=+148.646808760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.552716 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" event={"ID":"c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4","Type":"ContainerStarted","Data":"d4ce670f8a676b60d9a3ada3df4d062940bc93639c7e648af81811bc795605c8"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.552780 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" event={"ID":"c8cc1f0d-e65b-40f0-8e9b-d963c82b2bd4","Type":"ContainerStarted","Data":"d05958fa7745bb29bd3bbe9ae87de89f328b7509cb395c56a5b7484e3aa4e66e"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.580930 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" event={"ID":"e264d3a3-9add-4493-befe-ad59a40f5e5f","Type":"ContainerStarted","Data":"c5fa93340cf0df8f06ce4df0a9c495a0db92553a39a4791e6ed46d4dc20fde17"} Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.619742 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.620408 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-cfspd" Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.621094 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.121074282 +0000 UTC m=+148.750408447 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.637443 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-56tdr" podStartSLOduration=125.637417055 podStartE2EDuration="2m5.637417055s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.552142752 +0000 UTC m=+148.181476907" watchObservedRunningTime="2026-02-16 11:09:18.637417055 +0000 UTC m=+148.266751220" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.685799 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" podStartSLOduration=126.6857666 podStartE2EDuration="2m6.6857666s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.622882439 +0000 UTC m=+148.252216604" watchObservedRunningTime="2026-02-16 11:09:18.6857666 +0000 UTC m=+148.315100765" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.688028 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-22nsc" podStartSLOduration=9.688015601 podStartE2EDuration="9.688015601s" podCreationTimestamp="2026-02-16 11:09:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.680578967 +0000 UTC m=+148.309913132" watchObservedRunningTime="2026-02-16 11:09:18.688015601 +0000 UTC m=+148.317349766" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.724526 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.730538 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.230517963 +0000 UTC m=+148.859852128 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.782505 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-02-16 11:04:17 +0000 UTC, rotation deadline is 2026-11-16 07:53:22.757465228 +0000 UTC Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.782573 4949 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6548h44m3.974896474s for next certificate rotation Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.828931 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.829319 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.329302159 +0000 UTC m=+148.958636324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.888782 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cngx5" podStartSLOduration=126.888754412 podStartE2EDuration="2m6.888754412s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:18.885772579 +0000 UTC m=+148.515106754" watchObservedRunningTime="2026-02-16 11:09:18.888754412 +0000 UTC m=+148.518088577" Feb 16 11:09:18 crc kubenswrapper[4949]: I0216 11:09:18.931034 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:18 crc kubenswrapper[4949]: E0216 11:09:18.931503 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.431487892 +0000 UTC m=+149.060822057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.032241 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.032784 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.532763216 +0000 UTC m=+149.162097371 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.047110 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" podStartSLOduration=126.047069104 podStartE2EDuration="2m6.047069104s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:19.036533894 +0000 UTC m=+148.665868059" watchObservedRunningTime="2026-02-16 11:09:19.047069104 +0000 UTC m=+148.676403269" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.141294 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.141709 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.6416948 +0000 UTC m=+149.271028965 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.242974 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.243628 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.743601314 +0000 UTC m=+149.372935479 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.342470 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" podStartSLOduration=126.342439432 podStartE2EDuration="2m6.342439432s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:19.137913672 +0000 UTC m=+148.767247847" watchObservedRunningTime="2026-02-16 11:09:19.342439432 +0000 UTC m=+148.971773597" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.342655 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-9ddvv" podStartSLOduration=126.342652089 podStartE2EDuration="2m6.342652089s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:19.341695519 +0000 UTC m=+148.971029694" watchObservedRunningTime="2026-02-16 11:09:19.342652089 +0000 UTC m=+148.971986254" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.345741 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.346343 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.846314724 +0000 UTC m=+149.475649079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.447195 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.447347 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.94732093 +0000 UTC m=+149.576655085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.447663 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.448146 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:19.948137125 +0000 UTC m=+149.577471290 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.469455 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:19 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:19 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:19 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.469549 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.548398 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vwhm9" podStartSLOduration=126.548364977 podStartE2EDuration="2m6.548364977s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:19.471665283 +0000 UTC m=+149.100999448" watchObservedRunningTime="2026-02-16 11:09:19.548364977 +0000 UTC m=+149.177699142" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.549609 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.549876 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.049835683 +0000 UTC m=+149.679169848 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.550053 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.550790 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.050780422 +0000 UTC m=+149.680114587 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.581350 4949 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-l7qdl container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.581453 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" podUID="084fa2ba-3f08-4dae-9e15-2f582f6d3ca7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.18:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.610116 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zdcp8" event={"ID":"ce26720c-05cf-4162-942f-8b94e1bcd43e","Type":"ContainerStarted","Data":"6c09475ef24f391f110b660b359f29cc8a9e6fc44e613b2fa702d22538b0a67a"} Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.619051 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsl98" event={"ID":"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf","Type":"ContainerStarted","Data":"a3e662e2c9515d4b67a771779f7918fa70146bb9303f66df08f474a34c55c33a"} Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.620028 4949 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-khb7r container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.620103 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.636700 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-rrm98" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.652071 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.652725 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.152699007 +0000 UTC m=+149.782033182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.672748 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xpfr" podStartSLOduration=126.672721375 podStartE2EDuration="2m6.672721375s" podCreationTimestamp="2026-02-16 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:19.555537451 +0000 UTC m=+149.184871626" watchObservedRunningTime="2026-02-16 11:09:19.672721375 +0000 UTC m=+149.302055540" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.755559 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.756163 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.256137079 +0000 UTC m=+149.885471244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.776639 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fcdq5"] Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.778376 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.782123 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.792021 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fcdq5"] Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.860938 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.861399 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtvnm\" (UniqueName: \"kubernetes.io/projected/37cad064-c760-43e0-8a5c-fb66fc774246-kube-api-access-jtvnm\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.861462 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-catalog-content\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.861553 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-utilities\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.861759 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.361735329 +0000 UTC m=+149.991069484 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.916149 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-l7qdl" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.956829 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w4598"] Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.958073 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.961447 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.962932 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtvnm\" (UniqueName: \"kubernetes.io/projected/37cad064-c760-43e0-8a5c-fb66fc774246-kube-api-access-jtvnm\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.963039 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-catalog-content\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.963133 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.963198 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-utilities\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.963648 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-catalog-content\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.963856 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-utilities\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:19 crc kubenswrapper[4949]: E0216 11:09:19.964128 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.464086617 +0000 UTC m=+150.093420962 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:19 crc kubenswrapper[4949]: I0216 11:09:19.981941 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w4598"] Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.025983 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtvnm\" (UniqueName: \"kubernetes.io/projected/37cad064-c760-43e0-8a5c-fb66fc774246-kube-api-access-jtvnm\") pod \"community-operators-fcdq5\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.065265 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.065406 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.565379461 +0000 UTC m=+150.194713626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.065737 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-utilities\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.065776 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.065814 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mdzf\" (UniqueName: \"kubernetes.io/projected/73a35f90-b2ec-4518-b927-844e164a8531-kube-api-access-5mdzf\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.065888 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-catalog-content\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.066360 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.566352201 +0000 UTC m=+150.195686366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.109653 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.168080 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.168281 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.168319 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.168349 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-catalog-content\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.168389 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.168410 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.168433 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-utilities\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.168457 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mdzf\" (UniqueName: \"kubernetes.io/projected/73a35f90-b2ec-4518-b927-844e164a8531-kube-api-access-5mdzf\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.168810 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.668797012 +0000 UTC m=+150.298131177 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.172267 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-utilities\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.172608 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-catalog-content\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.174239 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.182273 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wnqwb"] Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.183946 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.191289 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.191762 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.194785 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.205610 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wnqwb"] Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.223187 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mdzf\" (UniqueName: \"kubernetes.io/projected/73a35f90-b2ec-4518-b927-844e164a8531-kube-api-access-5mdzf\") pod \"certified-operators-w4598\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.272554 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.274385 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-utilities\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.274439 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-catalog-content\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.274466 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f5w2\" (UniqueName: \"kubernetes.io/projected/d8668558-9b23-4195-9816-7f9034a699e8-kube-api-access-5f5w2\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.274496 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.274857 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.774840856 +0000 UTC m=+150.404175021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.275134 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.291371 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.291440 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.377216 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.377546 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-utilities\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.377599 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-catalog-content\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.377618 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f5w2\" (UniqueName: \"kubernetes.io/projected/d8668558-9b23-4195-9816-7f9034a699e8-kube-api-access-5f5w2\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.378137 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.878116123 +0000 UTC m=+150.507450288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.378573 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-utilities\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.378810 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-catalog-content\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.381033 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x9l5x"] Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.386751 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.406697 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x9l5x"] Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.429189 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f5w2\" (UniqueName: \"kubernetes.io/projected/d8668558-9b23-4195-9816-7f9034a699e8-kube-api-access-5f5w2\") pod \"community-operators-wnqwb\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.476636 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:20 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:20 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:20 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.477222 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.482344 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-catalog-content\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.482459 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-utilities\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.482509 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.482573 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45wcd\" (UniqueName: \"kubernetes.io/projected/5108144a-c7ec-4cd4-b792-eb6a943dce19-kube-api-access-45wcd\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.483006 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:20.98298996 +0000 UTC m=+150.612324125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.559192 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.601463 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.601737 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-utilities\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.601798 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45wcd\" (UniqueName: \"kubernetes.io/projected/5108144a-c7ec-4cd4-b792-eb6a943dce19-kube-api-access-45wcd\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.601829 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-catalog-content\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.602653 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-utilities\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.602795 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:21.102755984 +0000 UTC m=+150.732090149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.603274 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-catalog-content\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.658329 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45wcd\" (UniqueName: \"kubernetes.io/projected/5108144a-c7ec-4cd4-b792-eb6a943dce19-kube-api-access-45wcd\") pod \"certified-operators-x9l5x\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.710750 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.711264 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.711719 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:21.211704418 +0000 UTC m=+150.841038583 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.711722 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsl98" event={"ID":"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf","Type":"ContainerStarted","Data":"3d7bde1062bb2a09fa6964154de846d280cc2a4bbcc8a3ce05600e1d1a16c2d3"} Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.720094 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.814280 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.816661 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:21.316615567 +0000 UTC m=+150.945949732 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: E0216 11:09:20.917430 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:21.417411756 +0000 UTC m=+151.046745921 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:20 crc kubenswrapper[4949]: I0216 11:09:20.916962 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.020481 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.020860 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:21.520846018 +0000 UTC m=+151.150180183 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.122373 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.122777 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:21.622763062 +0000 UTC m=+151.252097227 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.166545 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.167384 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.171435 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.173813 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.189594 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fcdq5"] Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.214260 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.223345 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.223620 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:21.723605573 +0000 UTC m=+151.352939738 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: W0216 11:09:21.291512 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37cad064_c760_43e0_8a5c_fb66fc774246.slice/crio-155a6dad4e1e4948b44ad4746ab755f15e081bca437333b9daa0e990cff48143 WatchSource:0}: Error finding container 155a6dad4e1e4948b44ad4746ab755f15e081bca437333b9daa0e990cff48143: Status 404 returned error can't find the container with id 155a6dad4e1e4948b44ad4746ab755f15e081bca437333b9daa0e990cff48143 Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.326032 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8bde6ca1-e817-4de2-8b86-ca67a394915b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8bde6ca1-e817-4de2-8b86-ca67a394915b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.326088 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8bde6ca1-e817-4de2-8b86-ca67a394915b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8bde6ca1-e817-4de2-8b86-ca67a394915b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.326111 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.326395 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:21.826383974 +0000 UTC m=+151.455718139 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.335284 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.335325 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.347716 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.385867 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w4598"] Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.427153 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tt99q" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.527905 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.536559 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:21 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:21 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:21 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.536606 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.537524 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.541270 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8bde6ca1-e817-4de2-8b86-ca67a394915b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8bde6ca1-e817-4de2-8b86-ca67a394915b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.560453 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:22.060385969 +0000 UTC m=+151.689720124 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.562180 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8bde6ca1-e817-4de2-8b86-ca67a394915b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8bde6ca1-e817-4de2-8b86-ca67a394915b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.562318 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.563508 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:22.063482836 +0000 UTC m=+151.692817001 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.568990 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8bde6ca1-e817-4de2-8b86-ca67a394915b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8bde6ca1-e817-4de2-8b86-ca67a394915b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.620206 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.620307 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.621454 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.621483 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.678626 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.680127 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:22.180106911 +0000 UTC m=+151.809441076 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.723902 4949 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.727329 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.728999 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.731104 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsl98" event={"ID":"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf","Type":"ContainerStarted","Data":"b068f13789807a27c3fedbe48a4f442d23b3b071d70a0bfa780dfe7f15bc9efb"} Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.732989 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fcdq5" event={"ID":"37cad064-c760-43e0-8a5c-fb66fc774246","Type":"ContainerStarted","Data":"155a6dad4e1e4948b44ad4746ab755f15e081bca437333b9daa0e990cff48143"} Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.745837 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"1326405d92f9e06bea298e3c9bb238ad64b12406ffd6e534f988de99465b34e7"} Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.746581 4949 patch_prober.go:28] interesting pod/apiserver-76f77b778f-vm5md container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]log ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]etcd ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/start-apiserver-admission-initializer ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/generic-apiserver-start-informers ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/max-in-flight-filter ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/storage-object-count-tracker-hook ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/image.openshift.io-apiserver-caches ok Feb 16 11:09:21 crc kubenswrapper[4949]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Feb 16 11:09:21 crc kubenswrapper[4949]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/project.openshift.io-projectcache ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/openshift.io-startinformers ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/openshift.io-restmapperupdater ok Feb 16 11:09:21 crc kubenswrapper[4949]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Feb 16 11:09:21 crc kubenswrapper[4949]: livez check failed Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.746699 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" podUID="a398cced-c30a-4638-96c2-c7fa84672dab" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.749206 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4598" event={"ID":"73a35f90-b2ec-4518-b927-844e164a8531","Type":"ContainerStarted","Data":"ada7bdc9cbafda6faf3dec2384e7005bc52889809c1f57d378e5b237b78412af"} Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.767571 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c2gtt"] Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.770295 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8bde6ca1-e817-4de2-8b86-ca67a394915b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8bde6ca1-e817-4de2-8b86-ca67a394915b\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.771118 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.772485 4949 generic.go:334] "Generic (PLEG): container finished" podID="dc6af30d-b0c6-47f1-90d2-9d297a7d2b23" containerID="ced7e0acb5315baa549cbfb4b821348a6f1b8f552adb2f7597fe780b7fe351fa" exitCode=0 Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.773117 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" event={"ID":"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23","Type":"ContainerDied","Data":"ced7e0acb5315baa549cbfb4b821348a6f1b8f552adb2f7597fe780b7fe351fa"} Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.774963 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.782059 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.782545 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:22.282528041 +0000 UTC m=+151.911862206 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.783606 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2gtt"] Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.815240 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7r2kv" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.887431 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.887971 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hn2z\" (UniqueName: \"kubernetes.io/projected/19e05ba4-d60e-479d-ae62-6853917d7537-kube-api-access-5hn2z\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.888078 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-catalog-content\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.888158 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-utilities\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:21 crc kubenswrapper[4949]: E0216 11:09:21.889111 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:22.389089171 +0000 UTC m=+152.018423336 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.891986 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.926870 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wnqwb"] Feb 16 11:09:21 crc kubenswrapper[4949]: I0216 11:09:21.952753 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x9l5x"] Feb 16 11:09:21 crc kubenswrapper[4949]: W0216 11:09:21.981945 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8668558_9b23_4195_9816_7f9034a699e8.slice/crio-9b1c7f98d327be6b58c2e7011b854b22cb132f1b1aa5d43fb3b63251a98896b7 WatchSource:0}: Error finding container 9b1c7f98d327be6b58c2e7011b854b22cb132f1b1aa5d43fb3b63251a98896b7: Status 404 returned error can't find the container with id 9b1c7f98d327be6b58c2e7011b854b22cb132f1b1aa5d43fb3b63251a98896b7 Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:21.993142 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hn2z\" (UniqueName: \"kubernetes.io/projected/19e05ba4-d60e-479d-ae62-6853917d7537-kube-api-access-5hn2z\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:21.993294 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-catalog-content\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:21.993312 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-utilities\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:21.993386 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:22 crc kubenswrapper[4949]: E0216 11:09:21.993720 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:22.49370669 +0000 UTC m=+152.123040855 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:21.993958 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-catalog-content\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:21.993965 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-utilities\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.023782 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hn2z\" (UniqueName: \"kubernetes.io/projected/19e05ba4-d60e-479d-ae62-6853917d7537-kube-api-access-5hn2z\") pod \"redhat-marketplace-c2gtt\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.094811 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:22 crc kubenswrapper[4949]: E0216 11:09:22.094993 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-16 11:09:22.594950563 +0000 UTC m=+152.224284738 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.096662 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:22 crc kubenswrapper[4949]: E0216 11:09:22.097260 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-16 11:09:22.597244795 +0000 UTC m=+152.226578960 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzx7x" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.139659 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.142914 4949 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-02-16T11:09:21.723925364Z","Handler":null,"Name":""} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.192911 4949 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.192956 4949 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.193978 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ccdhx"] Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.196385 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.200044 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.242790 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccdhx"] Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.255680 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.305390 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-utilities\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.305541 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.305563 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.305586 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6zwb\" (UniqueName: \"kubernetes.io/projected/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-kube-api-access-g6zwb\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.305862 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-catalog-content\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.319774 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.319836 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:22 crc kubenswrapper[4949]: W0216 11:09:22.353766 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod8bde6ca1_e817_4de2_8b86_ca67a394915b.slice/crio-78dd0ed7bb48dfcd66c11ae4f5ddc28da40bd15d28f886c6f100917d6308f19b WatchSource:0}: Error finding container 78dd0ed7bb48dfcd66c11ae4f5ddc28da40bd15d28f886c6f100917d6308f19b: Status 404 returned error can't find the container with id 78dd0ed7bb48dfcd66c11ae4f5ddc28da40bd15d28f886c6f100917d6308f19b Feb 16 11:09:22 crc kubenswrapper[4949]: E0216 11:09:22.367774 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73a35f90_b2ec_4518_b927_844e164a8531.slice/crio-1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73a35f90_b2ec_4518_b927_844e164a8531.slice/crio-conmon-1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9.scope\": RecentStats: unable to find data in memory cache]" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.381786 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzx7x\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.407474 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-catalog-content\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.407545 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-utilities\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.407939 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6zwb\" (UniqueName: \"kubernetes.io/projected/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-kube-api-access-g6zwb\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.407969 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-catalog-content\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.408492 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-utilities\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.436548 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6zwb\" (UniqueName: \"kubernetes.io/projected/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-kube-api-access-g6zwb\") pod \"redhat-marketplace-ccdhx\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.461348 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.470274 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.470350 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.473588 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:22 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:22 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:22 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.473670 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.480710 4949 patch_prober.go:28] interesting pod/console-f9d7485db-5xfq9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.480778 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5xfq9" podUID="468461de-4a56-47b0-a5a9-cf6e51b6de47" containerName="console" probeResult="failure" output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.566454 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.658296 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2gtt"] Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.708522 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:09:22 crc kubenswrapper[4949]: W0216 11:09:22.712425 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19e05ba4_d60e_479d_ae62_6853917d7537.slice/crio-1972a4e79f1728013337bf3616f75cd0c1ff61c6b2230cfb331830fa04abb675 WatchSource:0}: Error finding container 1972a4e79f1728013337bf3616f75cd0c1ff61c6b2230cfb331830fa04abb675: Status 404 returned error can't find the container with id 1972a4e79f1728013337bf3616f75cd0c1ff61c6b2230cfb331830fa04abb675 Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.790030 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2gtt" event={"ID":"19e05ba4-d60e-479d-ae62-6853917d7537","Type":"ContainerStarted","Data":"1972a4e79f1728013337bf3616f75cd0c1ff61c6b2230cfb331830fa04abb675"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.793289 4949 generic.go:334] "Generic (PLEG): container finished" podID="37cad064-c760-43e0-8a5c-fb66fc774246" containerID="29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e" exitCode=0 Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.793400 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fcdq5" event={"ID":"37cad064-c760-43e0-8a5c-fb66fc774246","Type":"ContainerDied","Data":"29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.794887 4949 generic.go:334] "Generic (PLEG): container finished" podID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerID="a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091" exitCode=0 Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.794927 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9l5x" event={"ID":"5108144a-c7ec-4cd4-b792-eb6a943dce19","Type":"ContainerDied","Data":"a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.794943 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9l5x" event={"ID":"5108144a-c7ec-4cd4-b792-eb6a943dce19","Type":"ContainerStarted","Data":"613011fab2f3833612d672ea0b604205fbce494f015b5727a0457e79986a956b"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.796451 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.803861 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"8bde6ca1-e817-4de2-8b86-ca67a394915b","Type":"ContainerStarted","Data":"78dd0ed7bb48dfcd66c11ae4f5ddc28da40bd15d28f886c6f100917d6308f19b"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.812182 4949 generic.go:334] "Generic (PLEG): container finished" podID="d8668558-9b23-4195-9816-7f9034a699e8" containerID="5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94" exitCode=0 Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.812265 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wnqwb" event={"ID":"d8668558-9b23-4195-9816-7f9034a699e8","Type":"ContainerDied","Data":"5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.812294 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wnqwb" event={"ID":"d8668558-9b23-4195-9816-7f9034a699e8","Type":"ContainerStarted","Data":"9b1c7f98d327be6b58c2e7011b854b22cb132f1b1aa5d43fb3b63251a98896b7"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.829942 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"7f0fcc7a5952e8c4354abd7e9fe3b64ea1b9288bd11546b97a28e7c9b53772dd"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.830003 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"477bf54ca9a84bb13ba286d06d1fa8b54df365a3f42b2aca246097dfff2f2c73"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.854676 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsl98" event={"ID":"7ae00ecc-93bf-4b26-b6f8-3dfa67a511bf","Type":"ContainerStarted","Data":"bd5e8ff741f8061698fc0020e48e8aad196d8ef9e0d882a3c8ade2e3a65c8936"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.857545 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9f1b6d308f810595b784dc211d5d2bc9ecba0f99fb91271f390e486c79f65faf"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.860812 4949 generic.go:334] "Generic (PLEG): container finished" podID="73a35f90-b2ec-4518-b927-844e164a8531" containerID="1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9" exitCode=0 Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.861163 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4598" event={"ID":"73a35f90-b2ec-4518-b927-844e164a8531","Type":"ContainerDied","Data":"1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.871902 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"0d20fd8c8c46e074f9682d8465478168abe8dd4817335b4f116d431e699de1b6"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.871993 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"baefc66e3dd78e5cf9ed99a11cb6bbb04583f2af8cabfdf407addd7b7c70f1a6"} Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.872484 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.944930 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-zsl98" podStartSLOduration=12.944904264 podStartE2EDuration="12.944904264s" podCreationTimestamp="2026-02-16 11:09:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:22.933117464 +0000 UTC m=+152.562451639" watchObservedRunningTime="2026-02-16 11:09:22.944904264 +0000 UTC m=+152.574238429" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.963822 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2jvc5"] Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.965222 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.971741 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 16 11:09:22 crc kubenswrapper[4949]: I0216 11:09:22.988997 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2jvc5"] Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.012466 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzx7x"] Feb 16 11:09:23 crc kubenswrapper[4949]: W0216 11:09:23.107367 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d09a8d4_1225_4b5a_94ee_149b7c0fe01e.slice/crio-8213291aafa07829d745958cf0e5d25cb120f2ba76c934aad396faec08f84684 WatchSource:0}: Error finding container 8213291aafa07829d745958cf0e5d25cb120f2ba76c934aad396faec08f84684: Status 404 returned error can't find the container with id 8213291aafa07829d745958cf0e5d25cb120f2ba76c934aad396faec08f84684 Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.120559 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-js2w6\" (UniqueName: \"kubernetes.io/projected/c2716c39-f511-47e9-a400-94cb1cd5ba42-kube-api-access-js2w6\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.120662 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-utilities\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.120919 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-catalog-content\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.213614 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccdhx"] Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.222355 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-js2w6\" (UniqueName: \"kubernetes.io/projected/c2716c39-f511-47e9-a400-94cb1cd5ba42-kube-api-access-js2w6\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.222435 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-utilities\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.222499 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-catalog-content\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.223034 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-catalog-content\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.223822 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-utilities\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.248964 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.253206 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-js2w6\" (UniqueName: \"kubernetes.io/projected/c2716c39-f511-47e9-a400-94cb1cd5ba42-kube-api-access-js2w6\") pod \"redhat-operators-2jvc5\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.308980 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.320575 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.407232 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rzk2z"] Feb 16 11:09:23 crc kubenswrapper[4949]: E0216 11:09:23.407487 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc6af30d-b0c6-47f1-90d2-9d297a7d2b23" containerName="collect-profiles" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.407503 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc6af30d-b0c6-47f1-90d2-9d297a7d2b23" containerName="collect-profiles" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.407644 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc6af30d-b0c6-47f1-90d2-9d297a7d2b23" containerName="collect-profiles" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.408536 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.425258 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-682n5\" (UniqueName: \"kubernetes.io/projected/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-kube-api-access-682n5\") pod \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.425298 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-config-volume\") pod \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.425446 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-secret-volume\") pod \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\" (UID: \"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23\") " Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.425536 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rzk2z"] Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.427011 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-config-volume" (OuterVolumeSpecName: "config-volume") pod "dc6af30d-b0c6-47f1-90d2-9d297a7d2b23" (UID: "dc6af30d-b0c6-47f1-90d2-9d297a7d2b23"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.437506 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "dc6af30d-b0c6-47f1-90d2-9d297a7d2b23" (UID: "dc6af30d-b0c6-47f1-90d2-9d297a7d2b23"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.437514 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-kube-api-access-682n5" (OuterVolumeSpecName: "kube-api-access-682n5") pod "dc6af30d-b0c6-47f1-90d2-9d297a7d2b23" (UID: "dc6af30d-b0c6-47f1-90d2-9d297a7d2b23"). InnerVolumeSpecName "kube-api-access-682n5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.465009 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:23 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:23 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:23 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.465071 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.527371 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-utilities\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.527442 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqqzr\" (UniqueName: \"kubernetes.io/projected/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-kube-api-access-fqqzr\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.527512 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-catalog-content\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.527565 4949 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.527583 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-682n5\" (UniqueName: \"kubernetes.io/projected/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-kube-api-access-682n5\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.527598 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.593641 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2jvc5"] Feb 16 11:09:23 crc kubenswrapper[4949]: W0216 11:09:23.618691 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2716c39_f511_47e9_a400_94cb1cd5ba42.slice/crio-7e8cf975ca616732517d61ab3753ca67da4d2472dfe92eb384bbb22efa0c8686 WatchSource:0}: Error finding container 7e8cf975ca616732517d61ab3753ca67da4d2472dfe92eb384bbb22efa0c8686: Status 404 returned error can't find the container with id 7e8cf975ca616732517d61ab3753ca67da4d2472dfe92eb384bbb22efa0c8686 Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.629546 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-catalog-content\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.629803 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-utilities\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.629882 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqqzr\" (UniqueName: \"kubernetes.io/projected/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-kube-api-access-fqqzr\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.630982 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-utilities\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.638223 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-catalog-content\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.652483 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqqzr\" (UniqueName: \"kubernetes.io/projected/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-kube-api-access-fqqzr\") pod \"redhat-operators-rzk2z\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.735053 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.877985 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccdhx" event={"ID":"67b23a8b-bf6c-4d65-9359-7ba9ffe71216","Type":"ContainerStarted","Data":"d4a5934ee78d299f172ca87798a5d501a4dc4e8a898301f4b2ec431585113b1e"} Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.879555 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" event={"ID":"dc6af30d-b0c6-47f1-90d2-9d297a7d2b23","Type":"ContainerDied","Data":"42b65871004c00b4cf69f142a929de242955c36313b0c18ce6fee72e09c440e4"} Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.879606 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.879610 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42b65871004c00b4cf69f142a929de242955c36313b0c18ce6fee72e09c440e4" Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.884639 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"8bde6ca1-e817-4de2-8b86-ca67a394915b","Type":"ContainerStarted","Data":"482d78542fde00428ef46202db92833a0260cc9a7fb8626f7356e58477a32889"} Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.889085 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" event={"ID":"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e","Type":"ContainerStarted","Data":"8213291aafa07829d745958cf0e5d25cb120f2ba76c934aad396faec08f84684"} Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.891676 4949 generic.go:334] "Generic (PLEG): container finished" podID="19e05ba4-d60e-479d-ae62-6853917d7537" containerID="fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932" exitCode=0 Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.892053 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2gtt" event={"ID":"19e05ba4-d60e-479d-ae62-6853917d7537","Type":"ContainerDied","Data":"fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932"} Feb 16 11:09:23 crc kubenswrapper[4949]: I0216 11:09:23.894965 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2jvc5" event={"ID":"c2716c39-f511-47e9-a400-94cb1cd5ba42","Type":"ContainerStarted","Data":"7e8cf975ca616732517d61ab3753ca67da4d2472dfe92eb384bbb22efa0c8686"} Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.137940 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rzk2z"] Feb 16 11:09:24 crc kubenswrapper[4949]: W0216 11:09:24.152680 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e4a6cf5_3cdc_45de_964d_cb39392b09a3.slice/crio-373c2113acfe58cd8bc37feaa8fdf4d01b10b6e37d9833166268bd965c4d8217 WatchSource:0}: Error finding container 373c2113acfe58cd8bc37feaa8fdf4d01b10b6e37d9833166268bd965c4d8217: Status 404 returned error can't find the container with id 373c2113acfe58cd8bc37feaa8fdf4d01b10b6e37d9833166268bd965c4d8217 Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.464196 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:24 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:24 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:24 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.464284 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.901752 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" event={"ID":"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e","Type":"ContainerStarted","Data":"699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5"} Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.902071 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.903563 4949 generic.go:334] "Generic (PLEG): container finished" podID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerID="4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e" exitCode=0 Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.903628 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2jvc5" event={"ID":"c2716c39-f511-47e9-a400-94cb1cd5ba42","Type":"ContainerDied","Data":"4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e"} Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.917021 4949 generic.go:334] "Generic (PLEG): container finished" podID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerID="1a98d63d4db63c567746e3b28866fdf9e07ab1bdced169aa4098d558953d47d1" exitCode=0 Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.917154 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccdhx" event={"ID":"67b23a8b-bf6c-4d65-9359-7ba9ffe71216","Type":"ContainerDied","Data":"1a98d63d4db63c567746e3b28866fdf9e07ab1bdced169aa4098d558953d47d1"} Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.921136 4949 generic.go:334] "Generic (PLEG): container finished" podID="8bde6ca1-e817-4de2-8b86-ca67a394915b" containerID="482d78542fde00428ef46202db92833a0260cc9a7fb8626f7356e58477a32889" exitCode=0 Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.921210 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"8bde6ca1-e817-4de2-8b86-ca67a394915b","Type":"ContainerDied","Data":"482d78542fde00428ef46202db92833a0260cc9a7fb8626f7356e58477a32889"} Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.923039 4949 generic.go:334] "Generic (PLEG): container finished" podID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerID="9e01da32733c13e7ed0fa15e674c06ed6382fc07111b0a4c3503f2028ff15ecd" exitCode=0 Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.923076 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzk2z" event={"ID":"7e4a6cf5-3cdc-45de-964d-cb39392b09a3","Type":"ContainerDied","Data":"9e01da32733c13e7ed0fa15e674c06ed6382fc07111b0a4c3503f2028ff15ecd"} Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.923098 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzk2z" event={"ID":"7e4a6cf5-3cdc-45de-964d-cb39392b09a3","Type":"ContainerStarted","Data":"373c2113acfe58cd8bc37feaa8fdf4d01b10b6e37d9833166268bd965c4d8217"} Feb 16 11:09:24 crc kubenswrapper[4949]: I0216 11:09:24.966550 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" podStartSLOduration=132.966526937 podStartE2EDuration="2m12.966526937s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:24.938116426 +0000 UTC m=+154.567450621" watchObservedRunningTime="2026-02-16 11:09:24.966526937 +0000 UTC m=+154.595861102" Feb 16 11:09:25 crc kubenswrapper[4949]: I0216 11:09:25.462264 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:25 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:25 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:25 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:25 crc kubenswrapper[4949]: I0216 11:09:25.462338 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.391570 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.393120 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.396429 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.405383 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.407489 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.463640 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:26 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:26 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:26 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.463729 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.487080 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.487145 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.487704 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.588288 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8bde6ca1-e817-4de2-8b86-ca67a394915b-kubelet-dir\") pod \"8bde6ca1-e817-4de2-8b86-ca67a394915b\" (UID: \"8bde6ca1-e817-4de2-8b86-ca67a394915b\") " Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.588355 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8bde6ca1-e817-4de2-8b86-ca67a394915b-kube-api-access\") pod \"8bde6ca1-e817-4de2-8b86-ca67a394915b\" (UID: \"8bde6ca1-e817-4de2-8b86-ca67a394915b\") " Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.588588 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.588644 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.588747 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.588819 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8bde6ca1-e817-4de2-8b86-ca67a394915b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "8bde6ca1-e817-4de2-8b86-ca67a394915b" (UID: "8bde6ca1-e817-4de2-8b86-ca67a394915b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.615117 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.624947 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bde6ca1-e817-4de2-8b86-ca67a394915b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "8bde6ca1-e817-4de2-8b86-ca67a394915b" (UID: "8bde6ca1-e817-4de2-8b86-ca67a394915b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.689631 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8bde6ca1-e817-4de2-8b86-ca67a394915b-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.689663 4949 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8bde6ca1-e817-4de2-8b86-ca67a394915b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.734048 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.741454 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-vm5md" Feb 16 11:09:26 crc kubenswrapper[4949]: I0216 11:09:26.776659 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:27 crc kubenswrapper[4949]: I0216 11:09:27.048336 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"8bde6ca1-e817-4de2-8b86-ca67a394915b","Type":"ContainerDied","Data":"78dd0ed7bb48dfcd66c11ae4f5ddc28da40bd15d28f886c6f100917d6308f19b"} Feb 16 11:09:27 crc kubenswrapper[4949]: I0216 11:09:27.048957 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78dd0ed7bb48dfcd66c11ae4f5ddc28da40bd15d28f886c6f100917d6308f19b" Feb 16 11:09:27 crc kubenswrapper[4949]: I0216 11:09:27.048405 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 16 11:09:27 crc kubenswrapper[4949]: I0216 11:09:27.292125 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 16 11:09:27 crc kubenswrapper[4949]: W0216 11:09:27.304329 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podbc0d788b_8672_4dd1_a93b_9531c15e50d9.slice/crio-0dfcd994601d0a7be49d2844aca8fe5e6f00d15c5975de6edaa66a30aa7c0683 WatchSource:0}: Error finding container 0dfcd994601d0a7be49d2844aca8fe5e6f00d15c5975de6edaa66a30aa7c0683: Status 404 returned error can't find the container with id 0dfcd994601d0a7be49d2844aca8fe5e6f00d15c5975de6edaa66a30aa7c0683 Feb 16 11:09:27 crc kubenswrapper[4949]: I0216 11:09:27.483650 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:27 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:27 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:27 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:27 crc kubenswrapper[4949]: I0216 11:09:27.483901 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:28 crc kubenswrapper[4949]: I0216 11:09:28.078710 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bc0d788b-8672-4dd1-a93b-9531c15e50d9","Type":"ContainerStarted","Data":"0dfcd994601d0a7be49d2844aca8fe5e6f00d15c5975de6edaa66a30aa7c0683"} Feb 16 11:09:28 crc kubenswrapper[4949]: I0216 11:09:28.466148 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:28 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:28 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:28 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:28 crc kubenswrapper[4949]: I0216 11:09:28.466264 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:28 crc kubenswrapper[4949]: I0216 11:09:28.510356 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-5pbl4" Feb 16 11:09:29 crc kubenswrapper[4949]: I0216 11:09:29.158486 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bc0d788b-8672-4dd1-a93b-9531c15e50d9","Type":"ContainerStarted","Data":"2079323ab008d718d8e17dcd4d3432a87a566413145dc5f52154c0a6f1febc86"} Feb 16 11:09:29 crc kubenswrapper[4949]: I0216 11:09:29.187471 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.187436501 podStartE2EDuration="3.187436501s" podCreationTimestamp="2026-02-16 11:09:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:29.17940771 +0000 UTC m=+158.808741885" watchObservedRunningTime="2026-02-16 11:09:29.187436501 +0000 UTC m=+158.816770666" Feb 16 11:09:29 crc kubenswrapper[4949]: I0216 11:09:29.462192 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:29 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:29 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:29 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:29 crc kubenswrapper[4949]: I0216 11:09:29.462316 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:30 crc kubenswrapper[4949]: I0216 11:09:30.194245 4949 generic.go:334] "Generic (PLEG): container finished" podID="bc0d788b-8672-4dd1-a93b-9531c15e50d9" containerID="2079323ab008d718d8e17dcd4d3432a87a566413145dc5f52154c0a6f1febc86" exitCode=0 Feb 16 11:09:30 crc kubenswrapper[4949]: I0216 11:09:30.194292 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bc0d788b-8672-4dd1-a93b-9531c15e50d9","Type":"ContainerDied","Data":"2079323ab008d718d8e17dcd4d3432a87a566413145dc5f52154c0a6f1febc86"} Feb 16 11:09:30 crc kubenswrapper[4949]: I0216 11:09:30.462036 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:30 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:30 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:30 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:30 crc kubenswrapper[4949]: I0216 11:09:30.462147 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.484043 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:31 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:31 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:31 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.484725 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.613681 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.613767 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.614040 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.614117 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.748776 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.823008 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kubelet-dir\") pod \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\" (UID: \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\") " Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.823135 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kube-api-access\") pod \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\" (UID: \"bc0d788b-8672-4dd1-a93b-9531c15e50d9\") " Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.823150 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "bc0d788b-8672-4dd1-a93b-9531c15e50d9" (UID: "bc0d788b-8672-4dd1-a93b-9531c15e50d9"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.823593 4949 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.872011 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "bc0d788b-8672-4dd1-a93b-9531c15e50d9" (UID: "bc0d788b-8672-4dd1-a93b-9531c15e50d9"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:09:31 crc kubenswrapper[4949]: I0216 11:09:31.927849 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bc0d788b-8672-4dd1-a93b-9531c15e50d9-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:32 crc kubenswrapper[4949]: I0216 11:09:32.265401 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bc0d788b-8672-4dd1-a93b-9531c15e50d9","Type":"ContainerDied","Data":"0dfcd994601d0a7be49d2844aca8fe5e6f00d15c5975de6edaa66a30aa7c0683"} Feb 16 11:09:32 crc kubenswrapper[4949]: I0216 11:09:32.265463 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dfcd994601d0a7be49d2844aca8fe5e6f00d15c5975de6edaa66a30aa7c0683" Feb 16 11:09:32 crc kubenswrapper[4949]: I0216 11:09:32.265585 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 16 11:09:32 crc kubenswrapper[4949]: I0216 11:09:32.459439 4949 patch_prober.go:28] interesting pod/console-f9d7485db-5xfq9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Feb 16 11:09:32 crc kubenswrapper[4949]: I0216 11:09:32.459512 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5xfq9" podUID="468461de-4a56-47b0-a5a9-cf6e51b6de47" containerName="console" probeResult="failure" output="Get \"https://10.217.0.30:8443/health\": dial tcp 10.217.0.30:8443: connect: connection refused" Feb 16 11:09:32 crc kubenswrapper[4949]: I0216 11:09:32.467323 4949 patch_prober.go:28] interesting pod/router-default-5444994796-t47hp container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 16 11:09:32 crc kubenswrapper[4949]: [-]has-synced failed: reason withheld Feb 16 11:09:32 crc kubenswrapper[4949]: [+]process-running ok Feb 16 11:09:32 crc kubenswrapper[4949]: healthz check failed Feb 16 11:09:32 crc kubenswrapper[4949]: I0216 11:09:32.467391 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t47hp" podUID="048dcd85-c085-40e8-b952-d97abe29ac36" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 16 11:09:33 crc kubenswrapper[4949]: I0216 11:09:33.462944 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:33 crc kubenswrapper[4949]: I0216 11:09:33.466645 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-t47hp" Feb 16 11:09:34 crc kubenswrapper[4949]: I0216 11:09:34.550994 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:09:34 crc kubenswrapper[4949]: I0216 11:09:34.551642 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:09:35 crc kubenswrapper[4949]: I0216 11:09:35.223574 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:09:35 crc kubenswrapper[4949]: I0216 11:09:35.255855 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/965b4f20-8786-4c47-8721-c348942551d6-metrics-certs\") pod \"network-metrics-daemon-6v4x7\" (UID: \"965b4f20-8786-4c47-8721-c348942551d6\") " pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:09:35 crc kubenswrapper[4949]: I0216 11:09:35.261572 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6v4x7" Feb 16 11:09:35 crc kubenswrapper[4949]: I0216 11:09:35.723448 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-drxpj"] Feb 16 11:09:35 crc kubenswrapper[4949]: I0216 11:09:35.723654 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" podUID="d2690ae1-0168-43e5-aa99-3e926f6979d8" containerName="controller-manager" containerID="cri-o://45be470dec9d32f09c00568509f058dfef333c5d781b4ba94484dce0c805464e" gracePeriod=30 Feb 16 11:09:35 crc kubenswrapper[4949]: I0216 11:09:35.745987 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5"] Feb 16 11:09:35 crc kubenswrapper[4949]: I0216 11:09:35.747457 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" podUID="0492d421-806b-48e4-8a97-3032888e370e" containerName="route-controller-manager" containerID="cri-o://065bbd95ddd3952eb45b48100cfb7573c85243ba02a3caaa76a856b67f1720ef" gracePeriod=30 Feb 16 11:09:37 crc kubenswrapper[4949]: I0216 11:09:37.333789 4949 generic.go:334] "Generic (PLEG): container finished" podID="d2690ae1-0168-43e5-aa99-3e926f6979d8" containerID="45be470dec9d32f09c00568509f058dfef333c5d781b4ba94484dce0c805464e" exitCode=0 Feb 16 11:09:37 crc kubenswrapper[4949]: I0216 11:09:37.333919 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" event={"ID":"d2690ae1-0168-43e5-aa99-3e926f6979d8","Type":"ContainerDied","Data":"45be470dec9d32f09c00568509f058dfef333c5d781b4ba94484dce0c805464e"} Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.469767 4949 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-drxpj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.469853 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" podUID="d2690ae1-0168-43e5-aa99-3e926f6979d8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.613301 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.613363 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.613409 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.613435 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.613538 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.614074 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"101a41d156e7d960d6955a37a690504c88c6b3b7aff45f1c234f73e7bdbe2a58"} pod="openshift-console/downloads-7954f5f757-kwq47" containerMessage="Container download-server failed liveness probe, will be restarted" Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.614164 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" containerID="cri-o://101a41d156e7d960d6955a37a690504c88c6b3b7aff45f1c234f73e7bdbe2a58" gracePeriod=2 Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.614294 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:41 crc kubenswrapper[4949]: I0216 11:09:41.614514 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:42 crc kubenswrapper[4949]: I0216 11:09:42.356047 4949 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-p2jx5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 16 11:09:42 crc kubenswrapper[4949]: I0216 11:09:42.356715 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" podUID="0492d421-806b-48e4-8a97-3032888e370e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 16 11:09:42 crc kubenswrapper[4949]: I0216 11:09:42.457831 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:42 crc kubenswrapper[4949]: I0216 11:09:42.465921 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:09:42 crc kubenswrapper[4949]: I0216 11:09:42.574490 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.405840 4949 generic.go:334] "Generic (PLEG): container finished" podID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerID="101a41d156e7d960d6955a37a690504c88c6b3b7aff45f1c234f73e7bdbe2a58" exitCode=0 Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.406339 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-kwq47" event={"ID":"89406f96-f3ec-4323-bb6a-c42175151f9d","Type":"ContainerDied","Data":"101a41d156e7d960d6955a37a690504c88c6b3b7aff45f1c234f73e7bdbe2a58"} Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.408535 4949 generic.go:334] "Generic (PLEG): container finished" podID="0492d421-806b-48e4-8a97-3032888e370e" containerID="065bbd95ddd3952eb45b48100cfb7573c85243ba02a3caaa76a856b67f1720ef" exitCode=0 Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.408605 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" event={"ID":"0492d421-806b-48e4-8a97-3032888e370e","Type":"ContainerDied","Data":"065bbd95ddd3952eb45b48100cfb7573c85243ba02a3caaa76a856b67f1720ef"} Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.698106 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.703349 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.749169 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl"] Feb 16 11:09:43 crc kubenswrapper[4949]: E0216 11:09:43.749704 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0492d421-806b-48e4-8a97-3032888e370e" containerName="route-controller-manager" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.749728 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0492d421-806b-48e4-8a97-3032888e370e" containerName="route-controller-manager" Feb 16 11:09:43 crc kubenswrapper[4949]: E0216 11:09:43.749749 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2690ae1-0168-43e5-aa99-3e926f6979d8" containerName="controller-manager" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.749761 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2690ae1-0168-43e5-aa99-3e926f6979d8" containerName="controller-manager" Feb 16 11:09:43 crc kubenswrapper[4949]: E0216 11:09:43.749798 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bde6ca1-e817-4de2-8b86-ca67a394915b" containerName="pruner" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.749808 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bde6ca1-e817-4de2-8b86-ca67a394915b" containerName="pruner" Feb 16 11:09:43 crc kubenswrapper[4949]: E0216 11:09:43.749825 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc0d788b-8672-4dd1-a93b-9531c15e50d9" containerName="pruner" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.749837 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc0d788b-8672-4dd1-a93b-9531c15e50d9" containerName="pruner" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.750165 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2690ae1-0168-43e5-aa99-3e926f6979d8" containerName="controller-manager" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.750220 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc0d788b-8672-4dd1-a93b-9531c15e50d9" containerName="pruner" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.750246 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bde6ca1-e817-4de2-8b86-ca67a394915b" containerName="pruner" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.750258 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="0492d421-806b-48e4-8a97-3032888e370e" containerName="route-controller-manager" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.753287 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.767773 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl"] Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793067 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-client-ca\") pod \"d2690ae1-0168-43e5-aa99-3e926f6979d8\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793450 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-config\") pod \"d2690ae1-0168-43e5-aa99-3e926f6979d8\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793635 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7pd6\" (UniqueName: \"kubernetes.io/projected/0492d421-806b-48e4-8a97-3032888e370e-kube-api-access-t7pd6\") pod \"0492d421-806b-48e4-8a97-3032888e370e\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793676 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0492d421-806b-48e4-8a97-3032888e370e-serving-cert\") pod \"0492d421-806b-48e4-8a97-3032888e370e\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793718 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-config\") pod \"0492d421-806b-48e4-8a97-3032888e370e\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793756 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2690ae1-0168-43e5-aa99-3e926f6979d8-serving-cert\") pod \"d2690ae1-0168-43e5-aa99-3e926f6979d8\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793784 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-client-ca\") pod \"0492d421-806b-48e4-8a97-3032888e370e\" (UID: \"0492d421-806b-48e4-8a97-3032888e370e\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793807 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-proxy-ca-bundles\") pod \"d2690ae1-0168-43e5-aa99-3e926f6979d8\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.793863 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lfbm\" (UniqueName: \"kubernetes.io/projected/d2690ae1-0168-43e5-aa99-3e926f6979d8-kube-api-access-6lfbm\") pod \"d2690ae1-0168-43e5-aa99-3e926f6979d8\" (UID: \"d2690ae1-0168-43e5-aa99-3e926f6979d8\") " Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.797669 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-client-ca" (OuterVolumeSpecName: "client-ca") pod "d2690ae1-0168-43e5-aa99-3e926f6979d8" (UID: "d2690ae1-0168-43e5-aa99-3e926f6979d8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.799489 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-client-ca" (OuterVolumeSpecName: "client-ca") pod "0492d421-806b-48e4-8a97-3032888e370e" (UID: "0492d421-806b-48e4-8a97-3032888e370e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.799675 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-config" (OuterVolumeSpecName: "config") pod "0492d421-806b-48e4-8a97-3032888e370e" (UID: "0492d421-806b-48e4-8a97-3032888e370e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.799857 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-config" (OuterVolumeSpecName: "config") pod "d2690ae1-0168-43e5-aa99-3e926f6979d8" (UID: "d2690ae1-0168-43e5-aa99-3e926f6979d8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.800922 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "d2690ae1-0168-43e5-aa99-3e926f6979d8" (UID: "d2690ae1-0168-43e5-aa99-3e926f6979d8"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.805948 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0492d421-806b-48e4-8a97-3032888e370e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0492d421-806b-48e4-8a97-3032888e370e" (UID: "0492d421-806b-48e4-8a97-3032888e370e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.806015 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0492d421-806b-48e4-8a97-3032888e370e-kube-api-access-t7pd6" (OuterVolumeSpecName: "kube-api-access-t7pd6") pod "0492d421-806b-48e4-8a97-3032888e370e" (UID: "0492d421-806b-48e4-8a97-3032888e370e"). InnerVolumeSpecName "kube-api-access-t7pd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.807792 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2690ae1-0168-43e5-aa99-3e926f6979d8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d2690ae1-0168-43e5-aa99-3e926f6979d8" (UID: "d2690ae1-0168-43e5-aa99-3e926f6979d8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.808286 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2690ae1-0168-43e5-aa99-3e926f6979d8-kube-api-access-6lfbm" (OuterVolumeSpecName: "kube-api-access-6lfbm") pod "d2690ae1-0168-43e5-aa99-3e926f6979d8" (UID: "d2690ae1-0168-43e5-aa99-3e926f6979d8"). InnerVolumeSpecName "kube-api-access-6lfbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895429 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-client-ca\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895518 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-config\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895671 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50648dae-4718-4e02-96f8-ccf78affa386-serving-cert\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895727 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g279\" (UniqueName: \"kubernetes.io/projected/50648dae-4718-4e02-96f8-ccf78affa386-kube-api-access-6g279\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895770 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lfbm\" (UniqueName: \"kubernetes.io/projected/d2690ae1-0168-43e5-aa99-3e926f6979d8-kube-api-access-6lfbm\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895782 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895792 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895803 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7pd6\" (UniqueName: \"kubernetes.io/projected/0492d421-806b-48e4-8a97-3032888e370e-kube-api-access-t7pd6\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895812 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0492d421-806b-48e4-8a97-3032888e370e-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895820 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895829 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2690ae1-0168-43e5-aa99-3e926f6979d8-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895911 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0492d421-806b-48e4-8a97-3032888e370e-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.895960 4949 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2690ae1-0168-43e5-aa99-3e926f6979d8-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.996773 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50648dae-4718-4e02-96f8-ccf78affa386-serving-cert\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.996869 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g279\" (UniqueName: \"kubernetes.io/projected/50648dae-4718-4e02-96f8-ccf78affa386-kube-api-access-6g279\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.996916 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-client-ca\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.996945 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-config\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:43 crc kubenswrapper[4949]: I0216 11:09:43.998656 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-client-ca\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.000528 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50648dae-4718-4e02-96f8-ccf78affa386-serving-cert\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.002547 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-config\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.017712 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g279\" (UniqueName: \"kubernetes.io/projected/50648dae-4718-4e02-96f8-ccf78affa386-kube-api-access-6g279\") pod \"route-controller-manager-846b55b649-9glbl\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.075915 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.421065 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.421081 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5" event={"ID":"0492d421-806b-48e4-8a97-3032888e370e","Type":"ContainerDied","Data":"57e1e949c61f9c9df9a49e72f051dbbe968f57db9ef03c6eed54935eaea6adbd"} Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.421747 4949 scope.go:117] "RemoveContainer" containerID="065bbd95ddd3952eb45b48100cfb7573c85243ba02a3caaa76a856b67f1720ef" Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.430883 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" event={"ID":"d2690ae1-0168-43e5-aa99-3e926f6979d8","Type":"ContainerDied","Data":"a31fdf74d29b0d02ab83860e1e23f9b26766b71db0545a29afd38b7fb2ff85a7"} Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.431011 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-drxpj" Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.498703 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5"] Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.502733 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p2jx5"] Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.523267 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-drxpj"] Feb 16 11:09:44 crc kubenswrapper[4949]: I0216 11:09:44.526260 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-drxpj"] Feb 16 11:09:45 crc kubenswrapper[4949]: I0216 11:09:45.249771 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0492d421-806b-48e4-8a97-3032888e370e" path="/var/lib/kubelet/pods/0492d421-806b-48e4-8a97-3032888e370e/volumes" Feb 16 11:09:45 crc kubenswrapper[4949]: I0216 11:09:45.251392 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2690ae1-0168-43e5-aa99-3e926f6979d8" path="/var/lib/kubelet/pods/d2690ae1-0168-43e5-aa99-3e926f6979d8/volumes" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.217371 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-b547dd45d-7wc59"] Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.220236 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.223952 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.225389 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.235475 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.235513 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.237745 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.239741 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.239803 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.241516 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b547dd45d-7wc59"] Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.337835 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-proxy-ca-bundles\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.337915 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03df7621-f424-4d6f-abd9-e56c2d3f8da6-serving-cert\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.338038 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtpbl\" (UniqueName: \"kubernetes.io/projected/03df7621-f424-4d6f-abd9-e56c2d3f8da6-kube-api-access-vtpbl\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.338069 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-config\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.338092 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-client-ca\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.438926 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-proxy-ca-bundles\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.439034 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03df7621-f424-4d6f-abd9-e56c2d3f8da6-serving-cert\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.439131 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtpbl\" (UniqueName: \"kubernetes.io/projected/03df7621-f424-4d6f-abd9-e56c2d3f8da6-kube-api-access-vtpbl\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.439158 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-config\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.439244 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-client-ca\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.440362 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-client-ca\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.440499 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-proxy-ca-bundles\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.440907 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-config\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.448956 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03df7621-f424-4d6f-abd9-e56c2d3f8da6-serving-cert\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.457302 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtpbl\" (UniqueName: \"kubernetes.io/projected/03df7621-f424-4d6f-abd9-e56c2d3f8da6-kube-api-access-vtpbl\") pod \"controller-manager-b547dd45d-7wc59\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:46 crc kubenswrapper[4949]: I0216 11:09:46.568966 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:51 crc kubenswrapper[4949]: I0216 11:09:51.615634 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:51 crc kubenswrapper[4949]: I0216 11:09:51.616851 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:53 crc kubenswrapper[4949]: I0216 11:09:53.390789 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-l9xzk" Feb 16 11:09:55 crc kubenswrapper[4949]: E0216 11:09:55.291056 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 16 11:09:55 crc kubenswrapper[4949]: E0216 11:09:55.291923 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jtvnm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-fcdq5_openshift-marketplace(37cad064-c760-43e0-8a5c-fb66fc774246): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:09:55 crc kubenswrapper[4949]: E0216 11:09:55.293221 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-fcdq5" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" Feb 16 11:09:55 crc kubenswrapper[4949]: I0216 11:09:55.657653 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-b547dd45d-7wc59"] Feb 16 11:09:55 crc kubenswrapper[4949]: I0216 11:09:55.764349 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl"] Feb 16 11:09:56 crc kubenswrapper[4949]: E0216 11:09:56.856875 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-fcdq5" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" Feb 16 11:09:56 crc kubenswrapper[4949]: E0216 11:09:56.926101 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 16 11:09:56 crc kubenswrapper[4949]: E0216 11:09:56.926325 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-45wcd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-x9l5x_openshift-marketplace(5108144a-c7ec-4cd4-b792-eb6a943dce19): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:09:56 crc kubenswrapper[4949]: E0216 11:09:56.928376 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-x9l5x" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" Feb 16 11:09:56 crc kubenswrapper[4949]: E0216 11:09:56.935581 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 16 11:09:56 crc kubenswrapper[4949]: E0216 11:09:56.935798 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5f5w2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-wnqwb_openshift-marketplace(d8668558-9b23-4195-9816-7f9034a699e8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:09:56 crc kubenswrapper[4949]: E0216 11:09:56.936975 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-wnqwb" podUID="d8668558-9b23-4195-9816-7f9034a699e8" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.177343 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-wnqwb" podUID="d8668558-9b23-4195-9816-7f9034a699e8" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.177743 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-x9l5x" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.254481 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.254649 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g6zwb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-ccdhx_openshift-marketplace(67b23a8b-bf6c-4d65-9359-7ba9ffe71216): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.255184 4949 scope.go:117] "RemoveContainer" containerID="45be470dec9d32f09c00568509f058dfef333c5d781b4ba94484dce0c805464e" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.255777 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-ccdhx" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.319065 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.320580 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5hn2z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-c2gtt_openshift-marketplace(19e05ba4-d60e-479d-ae62-6853917d7537): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.322843 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-c2gtt" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.339900 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.340047 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fqqzr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-rzk2z_openshift-marketplace(7e4a6cf5-3cdc-45de-964d-cb39392b09a3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.341505 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-rzk2z" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.392065 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.392315 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5mdzf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-w4598_openshift-marketplace(73a35f90-b2ec-4518-b927-844e164a8531): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.393741 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-w4598" podUID="73a35f90-b2ec-4518-b927-844e164a8531" Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.399530 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-6v4x7"] Feb 16 11:09:58 crc kubenswrapper[4949]: W0216 11:09:58.422268 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod965b4f20_8786_4c47_8721_c348942551d6.slice/crio-a778af45baf80b9eb28a6ca30ab8fb8ffd483e3f546134f037d88e6d6dfea231 WatchSource:0}: Error finding container a778af45baf80b9eb28a6ca30ab8fb8ffd483e3f546134f037d88e6d6dfea231: Status 404 returned error can't find the container with id a778af45baf80b9eb28a6ca30ab8fb8ffd483e3f546134f037d88e6d6dfea231 Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.529475 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl"] Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.542346 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" event={"ID":"965b4f20-8786-4c47-8721-c348942551d6","Type":"ContainerStarted","Data":"a778af45baf80b9eb28a6ca30ab8fb8ffd483e3f546134f037d88e6d6dfea231"} Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.549658 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-kwq47" event={"ID":"89406f96-f3ec-4323-bb6a-c42175151f9d","Type":"ContainerStarted","Data":"419e40474bc61857f98194bc1a3dfc6b3b9b62ce9b971ba3178b7ea5823cb88b"} Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.550121 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.550687 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.550729 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.568471 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-w4598" podUID="73a35f90-b2ec-4518-b927-844e164a8531" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.568628 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-c2gtt" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" Feb 16 11:09:58 crc kubenswrapper[4949]: W0216 11:09:58.569702 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50648dae_4718_4e02_96f8_ccf78affa386.slice/crio-41c7d8a0b1cda06136324e2e10ae02cc4acc46dccc6cf94dacb183eb4fd17b85 WatchSource:0}: Error finding container 41c7d8a0b1cda06136324e2e10ae02cc4acc46dccc6cf94dacb183eb4fd17b85: Status 404 returned error can't find the container with id 41c7d8a0b1cda06136324e2e10ae02cc4acc46dccc6cf94dacb183eb4fd17b85 Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.569943 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-ccdhx" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" Feb 16 11:09:58 crc kubenswrapper[4949]: E0216 11:09:58.576679 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-rzk2z" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" Feb 16 11:09:58 crc kubenswrapper[4949]: I0216 11:09:58.606134 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-b547dd45d-7wc59"] Feb 16 11:09:58 crc kubenswrapper[4949]: W0216 11:09:58.622589 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03df7621_f424_4d6f_abd9_e56c2d3f8da6.slice/crio-10baa77bfebeb8a1300bbec04110b3657bcbe8d299d97e3d524ea2335bde02db WatchSource:0}: Error finding container 10baa77bfebeb8a1300bbec04110b3657bcbe8d299d97e3d524ea2335bde02db: Status 404 returned error can't find the container with id 10baa77bfebeb8a1300bbec04110b3657bcbe8d299d97e3d524ea2335bde02db Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.559935 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" event={"ID":"03df7621-f424-4d6f-abd9-e56c2d3f8da6","Type":"ContainerStarted","Data":"68897a897f3c6e6c55e8238b384de16b4de146fffef9830dcbc28fc869081380"} Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.562390 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" event={"ID":"03df7621-f424-4d6f-abd9-e56c2d3f8da6","Type":"ContainerStarted","Data":"10baa77bfebeb8a1300bbec04110b3657bcbe8d299d97e3d524ea2335bde02db"} Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.562511 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.560076 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" podUID="03df7621-f424-4d6f-abd9-e56c2d3f8da6" containerName="controller-manager" containerID="cri-o://68897a897f3c6e6c55e8238b384de16b4de146fffef9830dcbc28fc869081380" gracePeriod=30 Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.566416 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" podUID="50648dae-4718-4e02-96f8-ccf78affa386" containerName="route-controller-manager" containerID="cri-o://903ddf2a53fd9e99020d879108847fc8c5b031c19e39dbf5f9dbbe66699fdc6c" gracePeriod=30 Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.566726 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" event={"ID":"50648dae-4718-4e02-96f8-ccf78affa386","Type":"ContainerStarted","Data":"903ddf2a53fd9e99020d879108847fc8c5b031c19e39dbf5f9dbbe66699fdc6c"} Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.566848 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" event={"ID":"50648dae-4718-4e02-96f8-ccf78affa386","Type":"ContainerStarted","Data":"41c7d8a0b1cda06136324e2e10ae02cc4acc46dccc6cf94dacb183eb4fd17b85"} Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.568270 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.573315 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.579631 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.582565 4949 generic.go:334] "Generic (PLEG): container finished" podID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerID="7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680" exitCode=0 Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.582693 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2jvc5" event={"ID":"c2716c39-f511-47e9-a400-94cb1cd5ba42","Type":"ContainerDied","Data":"7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680"} Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.586400 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" event={"ID":"965b4f20-8786-4c47-8721-c348942551d6","Type":"ContainerStarted","Data":"36e9cd3230a3f39be98fdb27237deff665836c0286b5531c6c070633098f0c59"} Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.586446 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6v4x7" event={"ID":"965b4f20-8786-4c47-8721-c348942551d6","Type":"ContainerStarted","Data":"16e0023f48f6dbcde4555012261baad1e399170ca9a344acc6fe3a8f1b49ecda"} Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.586998 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.587063 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.633479 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" podStartSLOduration=24.633444363 podStartE2EDuration="24.633444363s" podCreationTimestamp="2026-02-16 11:09:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:59.583980103 +0000 UTC m=+189.213314268" watchObservedRunningTime="2026-02-16 11:09:59.633444363 +0000 UTC m=+189.262778528" Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.660545 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" podStartSLOduration=24.660510941 podStartE2EDuration="24.660510941s" podCreationTimestamp="2026-02-16 11:09:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:59.651515079 +0000 UTC m=+189.280849244" watchObservedRunningTime="2026-02-16 11:09:59.660510941 +0000 UTC m=+189.289845106" Feb 16 11:09:59 crc kubenswrapper[4949]: I0216 11:09:59.681808 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-6v4x7" podStartSLOduration=167.681781868 podStartE2EDuration="2m47.681781868s" podCreationTimestamp="2026-02-16 11:07:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:09:59.677397571 +0000 UTC m=+189.306731756" watchObservedRunningTime="2026-02-16 11:09:59.681781868 +0000 UTC m=+189.311116033" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.297887 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.597461 4949 generic.go:334] "Generic (PLEG): container finished" podID="50648dae-4718-4e02-96f8-ccf78affa386" containerID="903ddf2a53fd9e99020d879108847fc8c5b031c19e39dbf5f9dbbe66699fdc6c" exitCode=0 Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.597529 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" event={"ID":"50648dae-4718-4e02-96f8-ccf78affa386","Type":"ContainerDied","Data":"903ddf2a53fd9e99020d879108847fc8c5b031c19e39dbf5f9dbbe66699fdc6c"} Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.601501 4949 generic.go:334] "Generic (PLEG): container finished" podID="03df7621-f424-4d6f-abd9-e56c2d3f8da6" containerID="68897a897f3c6e6c55e8238b384de16b4de146fffef9830dcbc28fc869081380" exitCode=0 Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.601569 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" event={"ID":"03df7621-f424-4d6f-abd9-e56c2d3f8da6","Type":"ContainerDied","Data":"68897a897f3c6e6c55e8238b384de16b4de146fffef9830dcbc28fc869081380"} Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.637995 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.695305 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7b974587b6-bqspv"] Feb 16 11:10:00 crc kubenswrapper[4949]: E0216 11:10:00.695643 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03df7621-f424-4d6f-abd9-e56c2d3f8da6" containerName="controller-manager" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.695663 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="03df7621-f424-4d6f-abd9-e56c2d3f8da6" containerName="controller-manager" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.695756 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="03df7621-f424-4d6f-abd9-e56c2d3f8da6" containerName="controller-manager" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.696569 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtpbl\" (UniqueName: \"kubernetes.io/projected/03df7621-f424-4d6f-abd9-e56c2d3f8da6-kube-api-access-vtpbl\") pod \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.696643 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-client-ca\") pod \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.696736 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-proxy-ca-bundles\") pod \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.696764 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03df7621-f424-4d6f-abd9-e56c2d3f8da6-serving-cert\") pod \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.696814 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-config\") pod \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\" (UID: \"03df7621-f424-4d6f-abd9-e56c2d3f8da6\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.696871 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7b974587b6-bqspv"] Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.697696 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-client-ca" (OuterVolumeSpecName: "client-ca") pod "03df7621-f424-4d6f-abd9-e56c2d3f8da6" (UID: "03df7621-f424-4d6f-abd9-e56c2d3f8da6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.697724 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "03df7621-f424-4d6f-abd9-e56c2d3f8da6" (UID: "03df7621-f424-4d6f-abd9-e56c2d3f8da6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.697795 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.701078 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-config" (OuterVolumeSpecName: "config") pod "03df7621-f424-4d6f-abd9-e56c2d3f8da6" (UID: "03df7621-f424-4d6f-abd9-e56c2d3f8da6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.707525 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03df7621-f424-4d6f-abd9-e56c2d3f8da6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "03df7621-f424-4d6f-abd9-e56c2d3f8da6" (UID: "03df7621-f424-4d6f-abd9-e56c2d3f8da6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.707625 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03df7621-f424-4d6f-abd9-e56c2d3f8da6-kube-api-access-vtpbl" (OuterVolumeSpecName: "kube-api-access-vtpbl") pod "03df7621-f424-4d6f-abd9-e56c2d3f8da6" (UID: "03df7621-f424-4d6f-abd9-e56c2d3f8da6"). InnerVolumeSpecName "kube-api-access-vtpbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.745020 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.798643 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-client-ca\") pod \"50648dae-4718-4e02-96f8-ccf78affa386\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.798780 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50648dae-4718-4e02-96f8-ccf78affa386-serving-cert\") pod \"50648dae-4718-4e02-96f8-ccf78affa386\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.798820 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-config\") pod \"50648dae-4718-4e02-96f8-ccf78affa386\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.798948 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g279\" (UniqueName: \"kubernetes.io/projected/50648dae-4718-4e02-96f8-ccf78affa386-kube-api-access-6g279\") pod \"50648dae-4718-4e02-96f8-ccf78affa386\" (UID: \"50648dae-4718-4e02-96f8-ccf78affa386\") " Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799210 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1cb921-0b7d-496e-98a8-282de0057521-serving-cert\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799277 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wc2mt\" (UniqueName: \"kubernetes.io/projected/7b1cb921-0b7d-496e-98a8-282de0057521-kube-api-access-wc2mt\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799335 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-client-ca\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799374 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-config\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799437 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-proxy-ca-bundles\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799491 4949 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799506 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03df7621-f424-4d6f-abd9-e56c2d3f8da6-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799519 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799533 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtpbl\" (UniqueName: \"kubernetes.io/projected/03df7621-f424-4d6f-abd9-e56c2d3f8da6-kube-api-access-vtpbl\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.799546 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/03df7621-f424-4d6f-abd9-e56c2d3f8da6-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.800379 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-client-ca" (OuterVolumeSpecName: "client-ca") pod "50648dae-4718-4e02-96f8-ccf78affa386" (UID: "50648dae-4718-4e02-96f8-ccf78affa386"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.801996 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-config" (OuterVolumeSpecName: "config") pod "50648dae-4718-4e02-96f8-ccf78affa386" (UID: "50648dae-4718-4e02-96f8-ccf78affa386"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.803129 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50648dae-4718-4e02-96f8-ccf78affa386-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "50648dae-4718-4e02-96f8-ccf78affa386" (UID: "50648dae-4718-4e02-96f8-ccf78affa386"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.806496 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50648dae-4718-4e02-96f8-ccf78affa386-kube-api-access-6g279" (OuterVolumeSpecName: "kube-api-access-6g279") pod "50648dae-4718-4e02-96f8-ccf78affa386" (UID: "50648dae-4718-4e02-96f8-ccf78affa386"). InnerVolumeSpecName "kube-api-access-6g279". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900626 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-proxy-ca-bundles\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900713 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1cb921-0b7d-496e-98a8-282de0057521-serving-cert\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900761 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wc2mt\" (UniqueName: \"kubernetes.io/projected/7b1cb921-0b7d-496e-98a8-282de0057521-kube-api-access-wc2mt\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900788 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-client-ca\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900823 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-config\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900889 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g279\" (UniqueName: \"kubernetes.io/projected/50648dae-4718-4e02-96f8-ccf78affa386-kube-api-access-6g279\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900909 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900921 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50648dae-4718-4e02-96f8-ccf78affa386-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.900933 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50648dae-4718-4e02-96f8-ccf78affa386-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.902474 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-client-ca\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.902681 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-config\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.903167 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-proxy-ca-bundles\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.905733 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1cb921-0b7d-496e-98a8-282de0057521-serving-cert\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:00 crc kubenswrapper[4949]: I0216 11:10:00.921496 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wc2mt\" (UniqueName: \"kubernetes.io/projected/7b1cb921-0b7d-496e-98a8-282de0057521-kube-api-access-wc2mt\") pod \"controller-manager-7b974587b6-bqspv\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.057472 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.414157 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7b974587b6-bqspv"] Feb 16 11:10:01 crc kubenswrapper[4949]: W0216 11:10:01.421992 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b1cb921_0b7d_496e_98a8_282de0057521.slice/crio-9e4f7e7af9db21749e14e766b28f4833db5cfa7656c1db2102a3e735756cfcd2 WatchSource:0}: Error finding container 9e4f7e7af9db21749e14e766b28f4833db5cfa7656c1db2102a3e735756cfcd2: Status 404 returned error can't find the container with id 9e4f7e7af9db21749e14e766b28f4833db5cfa7656c1db2102a3e735756cfcd2 Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.617983 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.618055 4949 patch_prober.go:28] interesting pod/downloads-7954f5f757-kwq47 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.618152 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.618075 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-kwq47" podUID="89406f96-f3ec-4323-bb6a-c42175151f9d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.623918 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" event={"ID":"7b1cb921-0b7d-496e-98a8-282de0057521","Type":"ContainerStarted","Data":"9e4f7e7af9db21749e14e766b28f4833db5cfa7656c1db2102a3e735756cfcd2"} Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.625946 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" event={"ID":"50648dae-4718-4e02-96f8-ccf78affa386","Type":"ContainerDied","Data":"41c7d8a0b1cda06136324e2e10ae02cc4acc46dccc6cf94dacb183eb4fd17b85"} Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.626076 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl" Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.626129 4949 scope.go:117] "RemoveContainer" containerID="903ddf2a53fd9e99020d879108847fc8c5b031c19e39dbf5f9dbbe66699fdc6c" Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.630616 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2jvc5" event={"ID":"c2716c39-f511-47e9-a400-94cb1cd5ba42","Type":"ContainerStarted","Data":"cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf"} Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.633009 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" event={"ID":"03df7621-f424-4d6f-abd9-e56c2d3f8da6","Type":"ContainerDied","Data":"10baa77bfebeb8a1300bbec04110b3657bcbe8d299d97e3d524ea2335bde02db"} Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.633115 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b547dd45d-7wc59" Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.648334 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl"] Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.652113 4949 scope.go:117] "RemoveContainer" containerID="68897a897f3c6e6c55e8238b384de16b4de146fffef9830dcbc28fc869081380" Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.660366 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-846b55b649-9glbl"] Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.666974 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-b547dd45d-7wc59"] Feb 16 11:10:01 crc kubenswrapper[4949]: I0216 11:10:01.667147 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-b547dd45d-7wc59"] Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.642271 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" event={"ID":"7b1cb921-0b7d-496e-98a8-282de0057521","Type":"ContainerStarted","Data":"ad6dee9a812af972bc73824beec83a0014c86f3337fdb7c54eb5117f5813ecb7"} Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.685914 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2jvc5" podStartSLOduration=4.408048949 podStartE2EDuration="40.685880046s" podCreationTimestamp="2026-02-16 11:09:22 +0000 UTC" firstStartedPulling="2026-02-16 11:09:24.904826013 +0000 UTC m=+154.534160178" lastFinishedPulling="2026-02-16 11:10:01.18265711 +0000 UTC m=+190.811991275" observedRunningTime="2026-02-16 11:10:02.661808141 +0000 UTC m=+192.291142306" watchObservedRunningTime="2026-02-16 11:10:02.685880046 +0000 UTC m=+192.315214211" Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.688429 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" podStartSLOduration=7.688410955 podStartE2EDuration="7.688410955s" podCreationTimestamp="2026-02-16 11:09:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:10:02.687628951 +0000 UTC m=+192.316963116" watchObservedRunningTime="2026-02-16 11:10:02.688410955 +0000 UTC m=+192.317745120" Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.981529 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 16 11:10:02 crc kubenswrapper[4949]: E0216 11:10:02.981787 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50648dae-4718-4e02-96f8-ccf78affa386" containerName="route-controller-manager" Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.981801 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="50648dae-4718-4e02-96f8-ccf78affa386" containerName="route-controller-manager" Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.981900 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="50648dae-4718-4e02-96f8-ccf78affa386" containerName="route-controller-manager" Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.982567 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.986340 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.989374 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 16 11:10:02 crc kubenswrapper[4949]: I0216 11:10:02.998752 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.036861 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/587167f3-5988-405e-bdb0-17859f047250-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"587167f3-5988-405e-bdb0-17859f047250\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.037027 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/587167f3-5988-405e-bdb0-17859f047250-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"587167f3-5988-405e-bdb0-17859f047250\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.139018 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/587167f3-5988-405e-bdb0-17859f047250-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"587167f3-5988-405e-bdb0-17859f047250\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.139118 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/587167f3-5988-405e-bdb0-17859f047250-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"587167f3-5988-405e-bdb0-17859f047250\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.139183 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/587167f3-5988-405e-bdb0-17859f047250-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"587167f3-5988-405e-bdb0-17859f047250\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.163919 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/587167f3-5988-405e-bdb0-17859f047250-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"587167f3-5988-405e-bdb0-17859f047250\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.227633 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2"] Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.228645 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.236762 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.237535 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.237916 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.238154 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.238468 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.238791 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.259787 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03df7621-f424-4d6f-abd9-e56c2d3f8da6" path="/var/lib/kubelet/pods/03df7621-f424-4d6f-abd9-e56c2d3f8da6/volumes" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.271708 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50648dae-4718-4e02-96f8-ccf78affa386" path="/var/lib/kubelet/pods/50648dae-4718-4e02-96f8-ccf78affa386/volumes" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.272985 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2"] Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.310066 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.310203 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.312053 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.342284 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr77v\" (UniqueName: \"kubernetes.io/projected/d0cc1aef-1574-4522-91f0-f0aa89b7e240-kube-api-access-jr77v\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.342415 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0cc1aef-1574-4522-91f0-f0aa89b7e240-serving-cert\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.342453 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-client-ca\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.342481 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-config\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.443141 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr77v\" (UniqueName: \"kubernetes.io/projected/d0cc1aef-1574-4522-91f0-f0aa89b7e240-kube-api-access-jr77v\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.443252 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0cc1aef-1574-4522-91f0-f0aa89b7e240-serving-cert\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.443280 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-client-ca\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.443304 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-config\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.444531 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-client-ca\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.444736 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-config\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.453565 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0cc1aef-1574-4522-91f0-f0aa89b7e240-serving-cert\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.465500 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr77v\" (UniqueName: \"kubernetes.io/projected/d0cc1aef-1574-4522-91f0-f0aa89b7e240-kube-api-access-jr77v\") pod \"route-controller-manager-68c644598f-thdf2\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.577584 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.662673 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.666744 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.685091 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 16 11:10:03 crc kubenswrapper[4949]: I0216 11:10:03.967417 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2"] Feb 16 11:10:03 crc kubenswrapper[4949]: W0216 11:10:03.984397 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0cc1aef_1574_4522_91f0_f0aa89b7e240.slice/crio-09f6fa5c83c199b27753a1fe8f4c74d530571faf86e7fef304db977caaa62aa0 WatchSource:0}: Error finding container 09f6fa5c83c199b27753a1fe8f4c74d530571faf86e7fef304db977caaa62aa0: Status 404 returned error can't find the container with id 09f6fa5c83c199b27753a1fe8f4c74d530571faf86e7fef304db977caaa62aa0 Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.550408 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.550933 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.674453 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" event={"ID":"d0cc1aef-1574-4522-91f0-f0aa89b7e240","Type":"ContainerStarted","Data":"e81973aab48a5469c66136db88bc3bfaa53354a8b76bfc368e65e933e3d0d718"} Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.674520 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" event={"ID":"d0cc1aef-1574-4522-91f0-f0aa89b7e240","Type":"ContainerStarted","Data":"09f6fa5c83c199b27753a1fe8f4c74d530571faf86e7fef304db977caaa62aa0"} Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.674732 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.679147 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"587167f3-5988-405e-bdb0-17859f047250","Type":"ContainerStarted","Data":"43e2821c9322f36dff860d92dbbfbe08b56be8b7c1cfed93eb78717a999b9c5b"} Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.679205 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"587167f3-5988-405e-bdb0-17859f047250","Type":"ContainerStarted","Data":"0aa294eae37b877c8a508269280375d00f25eb7a6185b6e7afd05627757a64cd"} Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.722330 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" podStartSLOduration=9.722299952 podStartE2EDuration="9.722299952s" podCreationTimestamp="2026-02-16 11:09:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:10:04.703313807 +0000 UTC m=+194.332647972" watchObservedRunningTime="2026-02-16 11:10:04.722299952 +0000 UTC m=+194.351634117" Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.725158 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=2.725145021 podStartE2EDuration="2.725145021s" podCreationTimestamp="2026-02-16 11:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:10:04.720219577 +0000 UTC m=+194.349553742" watchObservedRunningTime="2026-02-16 11:10:04.725145021 +0000 UTC m=+194.354479196" Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.789728 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:04 crc kubenswrapper[4949]: I0216 11:10:04.795270 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2jvc5" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="registry-server" probeResult="failure" output=< Feb 16 11:10:04 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:10:04 crc kubenswrapper[4949]: > Feb 16 11:10:05 crc kubenswrapper[4949]: I0216 11:10:05.686998 4949 generic.go:334] "Generic (PLEG): container finished" podID="587167f3-5988-405e-bdb0-17859f047250" containerID="43e2821c9322f36dff860d92dbbfbe08b56be8b7c1cfed93eb78717a999b9c5b" exitCode=0 Feb 16 11:10:05 crc kubenswrapper[4949]: I0216 11:10:05.687264 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"587167f3-5988-405e-bdb0-17859f047250","Type":"ContainerDied","Data":"43e2821c9322f36dff860d92dbbfbe08b56be8b7c1cfed93eb78717a999b9c5b"} Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.021670 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.130279 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/587167f3-5988-405e-bdb0-17859f047250-kube-api-access\") pod \"587167f3-5988-405e-bdb0-17859f047250\" (UID: \"587167f3-5988-405e-bdb0-17859f047250\") " Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.130356 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/587167f3-5988-405e-bdb0-17859f047250-kubelet-dir\") pod \"587167f3-5988-405e-bdb0-17859f047250\" (UID: \"587167f3-5988-405e-bdb0-17859f047250\") " Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.130565 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/587167f3-5988-405e-bdb0-17859f047250-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "587167f3-5988-405e-bdb0-17859f047250" (UID: "587167f3-5988-405e-bdb0-17859f047250"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.137275 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/587167f3-5988-405e-bdb0-17859f047250-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "587167f3-5988-405e-bdb0-17859f047250" (UID: "587167f3-5988-405e-bdb0-17859f047250"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.231889 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/587167f3-5988-405e-bdb0-17859f047250-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.231936 4949 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/587167f3-5988-405e-bdb0-17859f047250-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.702327 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"587167f3-5988-405e-bdb0-17859f047250","Type":"ContainerDied","Data":"0aa294eae37b877c8a508269280375d00f25eb7a6185b6e7afd05627757a64cd"} Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.702378 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0aa294eae37b877c8a508269280375d00f25eb7a6185b6e7afd05627757a64cd" Feb 16 11:10:07 crc kubenswrapper[4949]: I0216 11:10:07.702499 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 16 11:10:08 crc kubenswrapper[4949]: I0216 11:10:08.978964 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 16 11:10:08 crc kubenswrapper[4949]: E0216 11:10:08.979306 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="587167f3-5988-405e-bdb0-17859f047250" containerName="pruner" Feb 16 11:10:08 crc kubenswrapper[4949]: I0216 11:10:08.979325 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="587167f3-5988-405e-bdb0-17859f047250" containerName="pruner" Feb 16 11:10:08 crc kubenswrapper[4949]: I0216 11:10:08.979478 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="587167f3-5988-405e-bdb0-17859f047250" containerName="pruner" Feb 16 11:10:08 crc kubenswrapper[4949]: I0216 11:10:08.980026 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:08 crc kubenswrapper[4949]: I0216 11:10:08.983715 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 16 11:10:08 crc kubenswrapper[4949]: I0216 11:10:08.986263 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 16 11:10:08 crc kubenswrapper[4949]: I0216 11:10:08.991197 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.057719 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/594902df-fdc6-4c21-9c22-8b4fd690d408-kube-api-access\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.057800 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-kubelet-dir\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.057871 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-var-lock\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.159419 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/594902df-fdc6-4c21-9c22-8b4fd690d408-kube-api-access\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.159536 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-kubelet-dir\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.159623 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-kubelet-dir\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.159691 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-var-lock\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.159746 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-var-lock\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.179135 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/594902df-fdc6-4c21-9c22-8b4fd690d408-kube-api-access\") pod \"installer-9-crc\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.297940 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.541325 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 16 11:10:09 crc kubenswrapper[4949]: W0216 11:10:09.549549 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod594902df_fdc6_4c21_9c22_8b4fd690d408.slice/crio-cdb8d46f98fe52c02793ce3d0f2a989b2c6754e617d7d4536a7f64f0dd247225 WatchSource:0}: Error finding container cdb8d46f98fe52c02793ce3d0f2a989b2c6754e617d7d4536a7f64f0dd247225: Status 404 returned error can't find the container with id cdb8d46f98fe52c02793ce3d0f2a989b2c6754e617d7d4536a7f64f0dd247225 Feb 16 11:10:09 crc kubenswrapper[4949]: I0216 11:10:09.720079 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"594902df-fdc6-4c21-9c22-8b4fd690d408","Type":"ContainerStarted","Data":"cdb8d46f98fe52c02793ce3d0f2a989b2c6754e617d7d4536a7f64f0dd247225"} Feb 16 11:10:10 crc kubenswrapper[4949]: I0216 11:10:10.731465 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4598" event={"ID":"73a35f90-b2ec-4518-b927-844e164a8531","Type":"ContainerStarted","Data":"2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35"} Feb 16 11:10:10 crc kubenswrapper[4949]: I0216 11:10:10.734669 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"594902df-fdc6-4c21-9c22-8b4fd690d408","Type":"ContainerStarted","Data":"5e2152d0c3f27a0c146929394437a4b06579b4e491dae116c0d0edf0efcd60b9"} Feb 16 11:10:10 crc kubenswrapper[4949]: I0216 11:10:10.776147 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.776115538 podStartE2EDuration="2.776115538s" podCreationTimestamp="2026-02-16 11:10:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:10:10.770716797 +0000 UTC m=+200.400050962" watchObservedRunningTime="2026-02-16 11:10:10.776115538 +0000 UTC m=+200.405449703" Feb 16 11:10:11 crc kubenswrapper[4949]: I0216 11:10:11.641807 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-kwq47" Feb 16 11:10:11 crc kubenswrapper[4949]: I0216 11:10:11.748885 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fcdq5" event={"ID":"37cad064-c760-43e0-8a5c-fb66fc774246","Type":"ContainerStarted","Data":"a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a"} Feb 16 11:10:11 crc kubenswrapper[4949]: I0216 11:10:11.751208 4949 generic.go:334] "Generic (PLEG): container finished" podID="73a35f90-b2ec-4518-b927-844e164a8531" containerID="2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35" exitCode=0 Feb 16 11:10:11 crc kubenswrapper[4949]: I0216 11:10:11.751303 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4598" event={"ID":"73a35f90-b2ec-4518-b927-844e164a8531","Type":"ContainerDied","Data":"2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35"} Feb 16 11:10:11 crc kubenswrapper[4949]: I0216 11:10:11.763241 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9l5x" event={"ID":"5108144a-c7ec-4cd4-b792-eb6a943dce19","Type":"ContainerStarted","Data":"4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60"} Feb 16 11:10:11 crc kubenswrapper[4949]: I0216 11:10:11.765972 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzk2z" event={"ID":"7e4a6cf5-3cdc-45de-964d-cb39392b09a3","Type":"ContainerStarted","Data":"713c73e9ae1c0397ee0001441f8c155b48da5394dec962cd5397c20d351a6dc8"} Feb 16 11:10:12 crc kubenswrapper[4949]: I0216 11:10:12.775486 4949 generic.go:334] "Generic (PLEG): container finished" podID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerID="4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60" exitCode=0 Feb 16 11:10:12 crc kubenswrapper[4949]: I0216 11:10:12.775554 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9l5x" event={"ID":"5108144a-c7ec-4cd4-b792-eb6a943dce19","Type":"ContainerDied","Data":"4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60"} Feb 16 11:10:13 crc kubenswrapper[4949]: I0216 11:10:13.572957 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:10:13 crc kubenswrapper[4949]: I0216 11:10:13.625402 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:10:13 crc kubenswrapper[4949]: I0216 11:10:13.788452 4949 generic.go:334] "Generic (PLEG): container finished" podID="37cad064-c760-43e0-8a5c-fb66fc774246" containerID="a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a" exitCode=0 Feb 16 11:10:13 crc kubenswrapper[4949]: I0216 11:10:13.788548 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fcdq5" event={"ID":"37cad064-c760-43e0-8a5c-fb66fc774246","Type":"ContainerDied","Data":"a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a"} Feb 16 11:10:13 crc kubenswrapper[4949]: I0216 11:10:13.792414 4949 generic.go:334] "Generic (PLEG): container finished" podID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerID="713c73e9ae1c0397ee0001441f8c155b48da5394dec962cd5397c20d351a6dc8" exitCode=0 Feb 16 11:10:13 crc kubenswrapper[4949]: I0216 11:10:13.793293 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzk2z" event={"ID":"7e4a6cf5-3cdc-45de-964d-cb39392b09a3","Type":"ContainerDied","Data":"713c73e9ae1c0397ee0001441f8c155b48da5394dec962cd5397c20d351a6dc8"} Feb 16 11:10:14 crc kubenswrapper[4949]: I0216 11:10:14.801721 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2gtt" event={"ID":"19e05ba4-d60e-479d-ae62-6853917d7537","Type":"ContainerStarted","Data":"3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae"} Feb 16 11:10:15 crc kubenswrapper[4949]: I0216 11:10:15.673987 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7b974587b6-bqspv"] Feb 16 11:10:15 crc kubenswrapper[4949]: I0216 11:10:15.674370 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" podUID="7b1cb921-0b7d-496e-98a8-282de0057521" containerName="controller-manager" containerID="cri-o://ad6dee9a812af972bc73824beec83a0014c86f3337fdb7c54eb5117f5813ecb7" gracePeriod=30 Feb 16 11:10:15 crc kubenswrapper[4949]: I0216 11:10:15.693163 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2"] Feb 16 11:10:15 crc kubenswrapper[4949]: I0216 11:10:15.693556 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" podUID="d0cc1aef-1574-4522-91f0-f0aa89b7e240" containerName="route-controller-manager" containerID="cri-o://e81973aab48a5469c66136db88bc3bfaa53354a8b76bfc368e65e933e3d0d718" gracePeriod=30 Feb 16 11:10:15 crc kubenswrapper[4949]: I0216 11:10:15.813396 4949 generic.go:334] "Generic (PLEG): container finished" podID="19e05ba4-d60e-479d-ae62-6853917d7537" containerID="3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae" exitCode=0 Feb 16 11:10:15 crc kubenswrapper[4949]: I0216 11:10:15.813469 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2gtt" event={"ID":"19e05ba4-d60e-479d-ae62-6853917d7537","Type":"ContainerDied","Data":"3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae"} Feb 16 11:10:16 crc kubenswrapper[4949]: I0216 11:10:16.823082 4949 generic.go:334] "Generic (PLEG): container finished" podID="d0cc1aef-1574-4522-91f0-f0aa89b7e240" containerID="e81973aab48a5469c66136db88bc3bfaa53354a8b76bfc368e65e933e3d0d718" exitCode=0 Feb 16 11:10:16 crc kubenswrapper[4949]: I0216 11:10:16.823238 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" event={"ID":"d0cc1aef-1574-4522-91f0-f0aa89b7e240","Type":"ContainerDied","Data":"e81973aab48a5469c66136db88bc3bfaa53354a8b76bfc368e65e933e3d0d718"} Feb 16 11:10:16 crc kubenswrapper[4949]: I0216 11:10:16.825877 4949 generic.go:334] "Generic (PLEG): container finished" podID="7b1cb921-0b7d-496e-98a8-282de0057521" containerID="ad6dee9a812af972bc73824beec83a0014c86f3337fdb7c54eb5117f5813ecb7" exitCode=0 Feb 16 11:10:16 crc kubenswrapper[4949]: I0216 11:10:16.825946 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" event={"ID":"7b1cb921-0b7d-496e-98a8-282de0057521","Type":"ContainerDied","Data":"ad6dee9a812af972bc73824beec83a0014c86f3337fdb7c54eb5117f5813ecb7"} Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.377866 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.406851 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-client-ca\") pod \"7b1cb921-0b7d-496e-98a8-282de0057521\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.406976 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wc2mt\" (UniqueName: \"kubernetes.io/projected/7b1cb921-0b7d-496e-98a8-282de0057521-kube-api-access-wc2mt\") pod \"7b1cb921-0b7d-496e-98a8-282de0057521\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.407042 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-proxy-ca-bundles\") pod \"7b1cb921-0b7d-496e-98a8-282de0057521\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.407133 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1cb921-0b7d-496e-98a8-282de0057521-serving-cert\") pod \"7b1cb921-0b7d-496e-98a8-282de0057521\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.407214 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-config\") pod \"7b1cb921-0b7d-496e-98a8-282de0057521\" (UID: \"7b1cb921-0b7d-496e-98a8-282de0057521\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.408214 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7b1cb921-0b7d-496e-98a8-282de0057521" (UID: "7b1cb921-0b7d-496e-98a8-282de0057521"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.408292 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-config" (OuterVolumeSpecName: "config") pod "7b1cb921-0b7d-496e-98a8-282de0057521" (UID: "7b1cb921-0b7d-496e-98a8-282de0057521"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.409523 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-client-ca" (OuterVolumeSpecName: "client-ca") pod "7b1cb921-0b7d-496e-98a8-282de0057521" (UID: "7b1cb921-0b7d-496e-98a8-282de0057521"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.412511 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-fb497bdbb-l9z7g"] Feb 16 11:10:17 crc kubenswrapper[4949]: E0216 11:10:17.412882 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1cb921-0b7d-496e-98a8-282de0057521" containerName="controller-manager" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.412905 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1cb921-0b7d-496e-98a8-282de0057521" containerName="controller-manager" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.413148 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b1cb921-0b7d-496e-98a8-282de0057521" containerName="controller-manager" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.413737 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.417506 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b1cb921-0b7d-496e-98a8-282de0057521-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7b1cb921-0b7d-496e-98a8-282de0057521" (UID: "7b1cb921-0b7d-496e-98a8-282de0057521"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.417619 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b1cb921-0b7d-496e-98a8-282de0057521-kube-api-access-wc2mt" (OuterVolumeSpecName: "kube-api-access-wc2mt") pod "7b1cb921-0b7d-496e-98a8-282de0057521" (UID: "7b1cb921-0b7d-496e-98a8-282de0057521"). InnerVolumeSpecName "kube-api-access-wc2mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.430950 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fb497bdbb-l9z7g"] Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.496008 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.509651 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-proxy-ca-bundles\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.509888 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47n5d\" (UniqueName: \"kubernetes.io/projected/15506987-5e2a-48f1-b631-b602d36397da-kube-api-access-47n5d\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.510050 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-config\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.510093 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-client-ca\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.510112 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15506987-5e2a-48f1-b631-b602d36397da-serving-cert\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.510502 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.510552 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.510568 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wc2mt\" (UniqueName: \"kubernetes.io/projected/7b1cb921-0b7d-496e-98a8-282de0057521-kube-api-access-wc2mt\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.510585 4949 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b1cb921-0b7d-496e-98a8-282de0057521-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.510600 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b1cb921-0b7d-496e-98a8-282de0057521-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611135 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-config\") pod \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611290 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0cc1aef-1574-4522-91f0-f0aa89b7e240-serving-cert\") pod \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611366 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jr77v\" (UniqueName: \"kubernetes.io/projected/d0cc1aef-1574-4522-91f0-f0aa89b7e240-kube-api-access-jr77v\") pod \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611414 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-client-ca\") pod \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\" (UID: \"d0cc1aef-1574-4522-91f0-f0aa89b7e240\") " Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611702 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-config\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611741 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-client-ca\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611767 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15506987-5e2a-48f1-b631-b602d36397da-serving-cert\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611821 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-proxy-ca-bundles\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.611881 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47n5d\" (UniqueName: \"kubernetes.io/projected/15506987-5e2a-48f1-b631-b602d36397da-kube-api-access-47n5d\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.612439 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-config" (OuterVolumeSpecName: "config") pod "d0cc1aef-1574-4522-91f0-f0aa89b7e240" (UID: "d0cc1aef-1574-4522-91f0-f0aa89b7e240"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.612744 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-client-ca" (OuterVolumeSpecName: "client-ca") pod "d0cc1aef-1574-4522-91f0-f0aa89b7e240" (UID: "d0cc1aef-1574-4522-91f0-f0aa89b7e240"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.613452 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-client-ca\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.613507 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-proxy-ca-bundles\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.613733 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-config\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.616007 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0cc1aef-1574-4522-91f0-f0aa89b7e240-kube-api-access-jr77v" (OuterVolumeSpecName: "kube-api-access-jr77v") pod "d0cc1aef-1574-4522-91f0-f0aa89b7e240" (UID: "d0cc1aef-1574-4522-91f0-f0aa89b7e240"). InnerVolumeSpecName "kube-api-access-jr77v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.616870 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0cc1aef-1574-4522-91f0-f0aa89b7e240-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d0cc1aef-1574-4522-91f0-f0aa89b7e240" (UID: "d0cc1aef-1574-4522-91f0-f0aa89b7e240"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.626299 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15506987-5e2a-48f1-b631-b602d36397da-serving-cert\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.631259 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47n5d\" (UniqueName: \"kubernetes.io/projected/15506987-5e2a-48f1-b631-b602d36397da-kube-api-access-47n5d\") pod \"controller-manager-fb497bdbb-l9z7g\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.713867 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0cc1aef-1574-4522-91f0-f0aa89b7e240-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.713921 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.713939 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jr77v\" (UniqueName: \"kubernetes.io/projected/d0cc1aef-1574-4522-91f0-f0aa89b7e240-kube-api-access-jr77v\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.713952 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d0cc1aef-1574-4522-91f0-f0aa89b7e240-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.792509 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.834235 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.834234 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7b974587b6-bqspv" event={"ID":"7b1cb921-0b7d-496e-98a8-282de0057521","Type":"ContainerDied","Data":"9e4f7e7af9db21749e14e766b28f4833db5cfa7656c1db2102a3e735756cfcd2"} Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.834393 4949 scope.go:117] "RemoveContainer" containerID="ad6dee9a812af972bc73824beec83a0014c86f3337fdb7c54eb5117f5813ecb7" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.837521 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" event={"ID":"d0cc1aef-1574-4522-91f0-f0aa89b7e240","Type":"ContainerDied","Data":"09f6fa5c83c199b27753a1fe8f4c74d530571faf86e7fef304db977caaa62aa0"} Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.837794 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2" Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.876887 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2"] Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.883507 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-68c644598f-thdf2"] Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.892959 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7b974587b6-bqspv"] Feb 16 11:10:17 crc kubenswrapper[4949]: I0216 11:10:17.896578 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7b974587b6-bqspv"] Feb 16 11:10:18 crc kubenswrapper[4949]: I0216 11:10:18.428064 4949 scope.go:117] "RemoveContainer" containerID="e81973aab48a5469c66136db88bc3bfaa53354a8b76bfc368e65e933e3d0d718" Feb 16 11:10:19 crc kubenswrapper[4949]: I0216 11:10:19.245109 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b1cb921-0b7d-496e-98a8-282de0057521" path="/var/lib/kubelet/pods/7b1cb921-0b7d-496e-98a8-282de0057521/volumes" Feb 16 11:10:19 crc kubenswrapper[4949]: I0216 11:10:19.246642 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0cc1aef-1574-4522-91f0-f0aa89b7e240" path="/var/lib/kubelet/pods/d0cc1aef-1574-4522-91f0-f0aa89b7e240/volumes" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.244132 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d"] Feb 16 11:10:20 crc kubenswrapper[4949]: E0216 11:10:20.244628 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0cc1aef-1574-4522-91f0-f0aa89b7e240" containerName="route-controller-manager" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.244650 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0cc1aef-1574-4522-91f0-f0aa89b7e240" containerName="route-controller-manager" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.244837 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0cc1aef-1574-4522-91f0-f0aa89b7e240" containerName="route-controller-manager" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.245642 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.248423 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.248932 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.248997 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.249501 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.249868 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.250006 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.258363 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d"] Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.356965 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcl99\" (UniqueName: \"kubernetes.io/projected/0be02be5-bffe-44a9-8209-2eca5cba3e8a-kube-api-access-dcl99\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.357060 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-config\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.357147 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0be02be5-bffe-44a9-8209-2eca5cba3e8a-serving-cert\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.357244 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-client-ca\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.459189 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0be02be5-bffe-44a9-8209-2eca5cba3e8a-serving-cert\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.459287 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-client-ca\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.459405 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcl99\" (UniqueName: \"kubernetes.io/projected/0be02be5-bffe-44a9-8209-2eca5cba3e8a-kube-api-access-dcl99\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.459488 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-config\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.461546 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-client-ca\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.461815 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-config\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.470625 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0be02be5-bffe-44a9-8209-2eca5cba3e8a-serving-cert\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.478877 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcl99\" (UniqueName: \"kubernetes.io/projected/0be02be5-bffe-44a9-8209-2eca5cba3e8a-kube-api-access-dcl99\") pod \"route-controller-manager-865bc47cdb-8gl5d\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:20 crc kubenswrapper[4949]: I0216 11:10:20.568359 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:24 crc kubenswrapper[4949]: I0216 11:10:24.603963 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fb497bdbb-l9z7g"] Feb 16 11:10:24 crc kubenswrapper[4949]: I0216 11:10:24.887114 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" event={"ID":"15506987-5e2a-48f1-b631-b602d36397da","Type":"ContainerStarted","Data":"1571689ae33e4ecfebccd9c0ae9ebeb71bedae250d097c09cf00d2ea0ba42b0e"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.145108 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d"] Feb 16 11:10:25 crc kubenswrapper[4949]: W0216 11:10:25.157867 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0be02be5_bffe_44a9_8209_2eca5cba3e8a.slice/crio-02cbeea1c123942e741bc9b846440134dc0da37d34e983c04011e3b38f623564 WatchSource:0}: Error finding container 02cbeea1c123942e741bc9b846440134dc0da37d34e983c04011e3b38f623564: Status 404 returned error can't find the container with id 02cbeea1c123942e741bc9b846440134dc0da37d34e983c04011e3b38f623564 Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.915106 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wnqwb" event={"ID":"d8668558-9b23-4195-9816-7f9034a699e8","Type":"ContainerStarted","Data":"a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.917862 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzk2z" event={"ID":"7e4a6cf5-3cdc-45de-964d-cb39392b09a3","Type":"ContainerStarted","Data":"0d3f5c7a6154ed05cbcbf7ed777f330028a143a6b23a5f89b40b704192f379e1"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.925933 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4598" event={"ID":"73a35f90-b2ec-4518-b927-844e164a8531","Type":"ContainerStarted","Data":"aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.936861 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9l5x" event={"ID":"5108144a-c7ec-4cd4-b792-eb6a943dce19","Type":"ContainerStarted","Data":"5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.938535 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" event={"ID":"0be02be5-bffe-44a9-8209-2eca5cba3e8a","Type":"ContainerStarted","Data":"908214e164d5a66c733ded1317f54021fe06b8e99fcae559dc4fd6850c915c91"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.938559 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" event={"ID":"0be02be5-bffe-44a9-8209-2eca5cba3e8a","Type":"ContainerStarted","Data":"02cbeea1c123942e741bc9b846440134dc0da37d34e983c04011e3b38f623564"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.938868 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.940946 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" event={"ID":"15506987-5e2a-48f1-b631-b602d36397da","Type":"ContainerStarted","Data":"3ed6f76cbdfe7b6a7e947510f05508a720f90ea28f779426af0879b34b2d5f82"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.942131 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.947282 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.952513 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2gtt" event={"ID":"19e05ba4-d60e-479d-ae62-6853917d7537","Type":"ContainerStarted","Data":"96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.955421 4949 generic.go:334] "Generic (PLEG): container finished" podID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerID="05368bb10d7d7deb2a4fe5170709b0f721fc7b58fe7f885e5cf252778d0a5f81" exitCode=0 Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.955525 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccdhx" event={"ID":"67b23a8b-bf6c-4d65-9359-7ba9ffe71216","Type":"ContainerDied","Data":"05368bb10d7d7deb2a4fe5170709b0f721fc7b58fe7f885e5cf252778d0a5f81"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.958684 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fcdq5" event={"ID":"37cad064-c760-43e0-8a5c-fb66fc774246","Type":"ContainerStarted","Data":"9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387"} Feb 16 11:10:25 crc kubenswrapper[4949]: I0216 11:10:25.982881 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x9l5x" podStartSLOduration=4.343214339 podStartE2EDuration="1m5.982838375s" podCreationTimestamp="2026-02-16 11:09:20 +0000 UTC" firstStartedPulling="2026-02-16 11:09:22.79739231 +0000 UTC m=+152.426726475" lastFinishedPulling="2026-02-16 11:10:24.437016346 +0000 UTC m=+214.066350511" observedRunningTime="2026-02-16 11:10:25.977497505 +0000 UTC m=+215.606831670" watchObservedRunningTime="2026-02-16 11:10:25.982838375 +0000 UTC m=+215.612172540" Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:25.999999 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" podStartSLOduration=10.999971466 podStartE2EDuration="10.999971466s" podCreationTimestamp="2026-02-16 11:10:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:10:25.998471441 +0000 UTC m=+215.627805606" watchObservedRunningTime="2026-02-16 11:10:25.999971466 +0000 UTC m=+215.629305631" Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:26.039263 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" podStartSLOduration=11.039196465 podStartE2EDuration="11.039196465s" podCreationTimestamp="2026-02-16 11:10:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:10:26.032925988 +0000 UTC m=+215.662260143" watchObservedRunningTime="2026-02-16 11:10:26.039196465 +0000 UTC m=+215.668530630" Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:26.109733 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rzk2z" podStartSLOduration=3.107522079 podStartE2EDuration="1m3.109703628s" podCreationTimestamp="2026-02-16 11:09:23 +0000 UTC" firstStartedPulling="2026-02-16 11:09:24.924392496 +0000 UTC m=+154.553726661" lastFinishedPulling="2026-02-16 11:10:24.926574045 +0000 UTC m=+214.555908210" observedRunningTime="2026-02-16 11:10:26.073740446 +0000 UTC m=+215.703074611" watchObservedRunningTime="2026-02-16 11:10:26.109703628 +0000 UTC m=+215.739037793" Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:26.111075 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w4598" podStartSLOduration=12.822700576999999 podStartE2EDuration="1m7.111068439s" podCreationTimestamp="2026-02-16 11:09:19 +0000 UTC" firstStartedPulling="2026-02-16 11:09:22.863094919 +0000 UTC m=+152.492429084" lastFinishedPulling="2026-02-16 11:10:17.151462781 +0000 UTC m=+206.780796946" observedRunningTime="2026-02-16 11:10:26.110022268 +0000 UTC m=+215.739356433" watchObservedRunningTime="2026-02-16 11:10:26.111068439 +0000 UTC m=+215.740402604" Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:26.145507 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fcdq5" podStartSLOduration=7.935430371 podStartE2EDuration="1m7.145471025s" podCreationTimestamp="2026-02-16 11:09:19 +0000 UTC" firstStartedPulling="2026-02-16 11:09:22.79611548 +0000 UTC m=+152.425449645" lastFinishedPulling="2026-02-16 11:10:22.006156134 +0000 UTC m=+211.635490299" observedRunningTime="2026-02-16 11:10:26.143857037 +0000 UTC m=+215.773191202" watchObservedRunningTime="2026-02-16 11:10:26.145471025 +0000 UTC m=+215.774805190" Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:26.180636 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c2gtt" podStartSLOduration=4.129429863 podStartE2EDuration="1m5.180614623s" podCreationTimestamp="2026-02-16 11:09:21 +0000 UTC" firstStartedPulling="2026-02-16 11:09:23.8936834 +0000 UTC m=+153.523017565" lastFinishedPulling="2026-02-16 11:10:24.94486816 +0000 UTC m=+214.574202325" observedRunningTime="2026-02-16 11:10:26.178609783 +0000 UTC m=+215.807943948" watchObservedRunningTime="2026-02-16 11:10:26.180614623 +0000 UTC m=+215.809948788" Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:26.314105 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:26.968187 4949 generic.go:334] "Generic (PLEG): container finished" podID="d8668558-9b23-4195-9816-7f9034a699e8" containerID="a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77" exitCode=0 Feb 16 11:10:26 crc kubenswrapper[4949]: I0216 11:10:26.968326 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wnqwb" event={"ID":"d8668558-9b23-4195-9816-7f9034a699e8","Type":"ContainerDied","Data":"a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77"} Feb 16 11:10:27 crc kubenswrapper[4949]: I0216 11:10:27.982322 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wnqwb" event={"ID":"d8668558-9b23-4195-9816-7f9034a699e8","Type":"ContainerStarted","Data":"8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420"} Feb 16 11:10:28 crc kubenswrapper[4949]: I0216 11:10:28.008136 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccdhx" event={"ID":"67b23a8b-bf6c-4d65-9359-7ba9ffe71216","Type":"ContainerStarted","Data":"79df4687b09bfc583f1dd9e43f57aa8f864680d6d556b8eaa34b443b25d597bf"} Feb 16 11:10:28 crc kubenswrapper[4949]: I0216 11:10:28.015672 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wnqwb" podStartSLOduration=3.324831426 podStartE2EDuration="1m8.015648427s" podCreationTimestamp="2026-02-16 11:09:20 +0000 UTC" firstStartedPulling="2026-02-16 11:09:22.821446274 +0000 UTC m=+152.450780449" lastFinishedPulling="2026-02-16 11:10:27.512263285 +0000 UTC m=+217.141597450" observedRunningTime="2026-02-16 11:10:28.015343688 +0000 UTC m=+217.644677863" watchObservedRunningTime="2026-02-16 11:10:28.015648427 +0000 UTC m=+217.644982592" Feb 16 11:10:28 crc kubenswrapper[4949]: I0216 11:10:28.043256 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ccdhx" podStartSLOduration=3.621150255 podStartE2EDuration="1m6.043222618s" podCreationTimestamp="2026-02-16 11:09:22 +0000 UTC" firstStartedPulling="2026-02-16 11:09:24.918998347 +0000 UTC m=+154.548332512" lastFinishedPulling="2026-02-16 11:10:27.34107071 +0000 UTC m=+216.970404875" observedRunningTime="2026-02-16 11:10:28.041342442 +0000 UTC m=+217.670676607" watchObservedRunningTime="2026-02-16 11:10:28.043222618 +0000 UTC m=+217.672556773" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.111728 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.111793 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.168108 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.275735 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.275821 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.318831 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.562632 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.562718 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.610806 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.712742 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.712889 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:10:30 crc kubenswrapper[4949]: I0216 11:10:30.765343 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:10:31 crc kubenswrapper[4949]: I0216 11:10:31.077357 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:10:31 crc kubenswrapper[4949]: I0216 11:10:31.082480 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:10:31 crc kubenswrapper[4949]: I0216 11:10:31.088708 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:10:31 crc kubenswrapper[4949]: I0216 11:10:31.392870 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g4khj"] Feb 16 11:10:32 crc kubenswrapper[4949]: I0216 11:10:32.141284 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:10:32 crc kubenswrapper[4949]: I0216 11:10:32.141521 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:10:32 crc kubenswrapper[4949]: I0216 11:10:32.184848 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:10:32 crc kubenswrapper[4949]: I0216 11:10:32.709598 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:10:32 crc kubenswrapper[4949]: I0216 11:10:32.711227 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:10:32 crc kubenswrapper[4949]: I0216 11:10:32.759250 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:10:33 crc kubenswrapper[4949]: I0216 11:10:33.085154 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:10:33 crc kubenswrapper[4949]: I0216 11:10:33.092414 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:10:33 crc kubenswrapper[4949]: I0216 11:10:33.736190 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:10:33 crc kubenswrapper[4949]: I0216 11:10:33.736270 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:10:33 crc kubenswrapper[4949]: I0216 11:10:33.789637 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.093408 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.279714 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x9l5x"] Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.280101 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x9l5x" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerName="registry-server" containerID="cri-o://5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56" gracePeriod=2 Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.550494 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.550605 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.550691 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.551665 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.551742 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380" gracePeriod=600 Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.831744 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.897132 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45wcd\" (UniqueName: \"kubernetes.io/projected/5108144a-c7ec-4cd4-b792-eb6a943dce19-kube-api-access-45wcd\") pod \"5108144a-c7ec-4cd4-b792-eb6a943dce19\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.897320 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-utilities\") pod \"5108144a-c7ec-4cd4-b792-eb6a943dce19\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.897347 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-catalog-content\") pod \"5108144a-c7ec-4cd4-b792-eb6a943dce19\" (UID: \"5108144a-c7ec-4cd4-b792-eb6a943dce19\") " Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.898539 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-utilities" (OuterVolumeSpecName: "utilities") pod "5108144a-c7ec-4cd4-b792-eb6a943dce19" (UID: "5108144a-c7ec-4cd4-b792-eb6a943dce19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.904652 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5108144a-c7ec-4cd4-b792-eb6a943dce19-kube-api-access-45wcd" (OuterVolumeSpecName: "kube-api-access-45wcd") pod "5108144a-c7ec-4cd4-b792-eb6a943dce19" (UID: "5108144a-c7ec-4cd4-b792-eb6a943dce19"). InnerVolumeSpecName "kube-api-access-45wcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:34 crc kubenswrapper[4949]: I0216 11:10:34.949267 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5108144a-c7ec-4cd4-b792-eb6a943dce19" (UID: "5108144a-c7ec-4cd4-b792-eb6a943dce19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.000391 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45wcd\" (UniqueName: \"kubernetes.io/projected/5108144a-c7ec-4cd4-b792-eb6a943dce19-kube-api-access-45wcd\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.000451 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.000463 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5108144a-c7ec-4cd4-b792-eb6a943dce19-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.060335 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380" exitCode=0 Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.060750 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380"} Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.063408 4949 generic.go:334] "Generic (PLEG): container finished" podID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerID="5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56" exitCode=0 Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.063519 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9l5x" event={"ID":"5108144a-c7ec-4cd4-b792-eb6a943dce19","Type":"ContainerDied","Data":"5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56"} Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.063578 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9l5x" event={"ID":"5108144a-c7ec-4cd4-b792-eb6a943dce19","Type":"ContainerDied","Data":"613011fab2f3833612d672ea0b604205fbce494f015b5727a0457e79986a956b"} Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.063605 4949 scope.go:117] "RemoveContainer" containerID="5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.063895 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9l5x" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.084000 4949 scope.go:117] "RemoveContainer" containerID="4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.103470 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x9l5x"] Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.109289 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x9l5x"] Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.114016 4949 scope.go:117] "RemoveContainer" containerID="a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.132772 4949 scope.go:117] "RemoveContainer" containerID="5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56" Feb 16 11:10:35 crc kubenswrapper[4949]: E0216 11:10:35.135417 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56\": container with ID starting with 5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56 not found: ID does not exist" containerID="5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.135508 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56"} err="failed to get container status \"5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56\": rpc error: code = NotFound desc = could not find container \"5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56\": container with ID starting with 5055e828632ecd0658be3e79c03e350bf8212b86498e094922e4db729ad5ab56 not found: ID does not exist" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.135556 4949 scope.go:117] "RemoveContainer" containerID="4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60" Feb 16 11:10:35 crc kubenswrapper[4949]: E0216 11:10:35.143540 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60\": container with ID starting with 4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60 not found: ID does not exist" containerID="4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.143607 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60"} err="failed to get container status \"4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60\": rpc error: code = NotFound desc = could not find container \"4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60\": container with ID starting with 4506cb52e48e4a4e99923a8997412b89334d7d348c92643de687c4ca128fec60 not found: ID does not exist" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.143677 4949 scope.go:117] "RemoveContainer" containerID="a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091" Feb 16 11:10:35 crc kubenswrapper[4949]: E0216 11:10:35.144128 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091\": container with ID starting with a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091 not found: ID does not exist" containerID="a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.144208 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091"} err="failed to get container status \"a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091\": rpc error: code = NotFound desc = could not find container \"a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091\": container with ID starting with a9f9147dad2fba507a459e7be8a07e318b5d909428430941b48fea1d940b7091 not found: ID does not exist" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.246088 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" path="/var/lib/kubelet/pods/5108144a-c7ec-4cd4-b792-eb6a943dce19/volumes" Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.680444 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fb497bdbb-l9z7g"] Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.681237 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" podUID="15506987-5e2a-48f1-b631-b602d36397da" containerName="controller-manager" containerID="cri-o://3ed6f76cbdfe7b6a7e947510f05508a720f90ea28f779426af0879b34b2d5f82" gracePeriod=30 Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.777538 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d"] Feb 16 11:10:35 crc kubenswrapper[4949]: I0216 11:10:35.777903 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" podUID="0be02be5-bffe-44a9-8209-2eca5cba3e8a" containerName="route-controller-manager" containerID="cri-o://908214e164d5a66c733ded1317f54021fe06b8e99fcae559dc4fd6850c915c91" gracePeriod=30 Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.074293 4949 generic.go:334] "Generic (PLEG): container finished" podID="15506987-5e2a-48f1-b631-b602d36397da" containerID="3ed6f76cbdfe7b6a7e947510f05508a720f90ea28f779426af0879b34b2d5f82" exitCode=0 Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.074404 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" event={"ID":"15506987-5e2a-48f1-b631-b602d36397da","Type":"ContainerDied","Data":"3ed6f76cbdfe7b6a7e947510f05508a720f90ea28f779426af0879b34b2d5f82"} Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.078476 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"85a0f2845d673d577410365d3d922f1e1f73440200500da1d4a358fe007bfb62"} Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.082982 4949 generic.go:334] "Generic (PLEG): container finished" podID="0be02be5-bffe-44a9-8209-2eca5cba3e8a" containerID="908214e164d5a66c733ded1317f54021fe06b8e99fcae559dc4fd6850c915c91" exitCode=0 Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.083032 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" event={"ID":"0be02be5-bffe-44a9-8209-2eca5cba3e8a","Type":"ContainerDied","Data":"908214e164d5a66c733ded1317f54021fe06b8e99fcae559dc4fd6850c915c91"} Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.258387 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.263485 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.321876 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-client-ca\") pod \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.322033 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcl99\" (UniqueName: \"kubernetes.io/projected/0be02be5-bffe-44a9-8209-2eca5cba3e8a-kube-api-access-dcl99\") pod \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.322074 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-client-ca\") pod \"15506987-5e2a-48f1-b631-b602d36397da\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.322142 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47n5d\" (UniqueName: \"kubernetes.io/projected/15506987-5e2a-48f1-b631-b602d36397da-kube-api-access-47n5d\") pod \"15506987-5e2a-48f1-b631-b602d36397da\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.322219 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-config\") pod \"15506987-5e2a-48f1-b631-b602d36397da\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.322245 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-config\") pod \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.322272 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15506987-5e2a-48f1-b631-b602d36397da-serving-cert\") pod \"15506987-5e2a-48f1-b631-b602d36397da\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.322292 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0be02be5-bffe-44a9-8209-2eca5cba3e8a-serving-cert\") pod \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\" (UID: \"0be02be5-bffe-44a9-8209-2eca5cba3e8a\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.322402 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-proxy-ca-bundles\") pod \"15506987-5e2a-48f1-b631-b602d36397da\" (UID: \"15506987-5e2a-48f1-b631-b602d36397da\") " Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.323079 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-client-ca" (OuterVolumeSpecName: "client-ca") pod "0be02be5-bffe-44a9-8209-2eca5cba3e8a" (UID: "0be02be5-bffe-44a9-8209-2eca5cba3e8a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.323302 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-client-ca" (OuterVolumeSpecName: "client-ca") pod "15506987-5e2a-48f1-b631-b602d36397da" (UID: "15506987-5e2a-48f1-b631-b602d36397da"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.323382 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-config" (OuterVolumeSpecName: "config") pod "0be02be5-bffe-44a9-8209-2eca5cba3e8a" (UID: "0be02be5-bffe-44a9-8209-2eca5cba3e8a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.323435 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "15506987-5e2a-48f1-b631-b602d36397da" (UID: "15506987-5e2a-48f1-b631-b602d36397da"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.324630 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-config" (OuterVolumeSpecName: "config") pod "15506987-5e2a-48f1-b631-b602d36397da" (UID: "15506987-5e2a-48f1-b631-b602d36397da"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.325307 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.325338 4949 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.325357 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0be02be5-bffe-44a9-8209-2eca5cba3e8a-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.325416 4949 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-client-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.325448 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15506987-5e2a-48f1-b631-b602d36397da-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.328802 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15506987-5e2a-48f1-b631-b602d36397da-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "15506987-5e2a-48f1-b631-b602d36397da" (UID: "15506987-5e2a-48f1-b631-b602d36397da"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.328867 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15506987-5e2a-48f1-b631-b602d36397da-kube-api-access-47n5d" (OuterVolumeSpecName: "kube-api-access-47n5d") pod "15506987-5e2a-48f1-b631-b602d36397da" (UID: "15506987-5e2a-48f1-b631-b602d36397da"). InnerVolumeSpecName "kube-api-access-47n5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.328905 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0be02be5-bffe-44a9-8209-2eca5cba3e8a-kube-api-access-dcl99" (OuterVolumeSpecName: "kube-api-access-dcl99") pod "0be02be5-bffe-44a9-8209-2eca5cba3e8a" (UID: "0be02be5-bffe-44a9-8209-2eca5cba3e8a"). InnerVolumeSpecName "kube-api-access-dcl99". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.328991 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be02be5-bffe-44a9-8209-2eca5cba3e8a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0be02be5-bffe-44a9-8209-2eca5cba3e8a" (UID: "0be02be5-bffe-44a9-8209-2eca5cba3e8a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.428649 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcl99\" (UniqueName: \"kubernetes.io/projected/0be02be5-bffe-44a9-8209-2eca5cba3e8a-kube-api-access-dcl99\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.429063 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47n5d\" (UniqueName: \"kubernetes.io/projected/15506987-5e2a-48f1-b631-b602d36397da-kube-api-access-47n5d\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.429084 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0be02be5-bffe-44a9-8209-2eca5cba3e8a-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.429101 4949 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15506987-5e2a-48f1-b631-b602d36397da-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.677343 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccdhx"] Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.677801 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ccdhx" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerName="registry-server" containerID="cri-o://79df4687b09bfc583f1dd9e43f57aa8f864680d6d556b8eaa34b443b25d597bf" gracePeriod=2 Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.889470 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rzk2z"] Feb 16 11:10:36 crc kubenswrapper[4949]: I0216 11:10:36.890293 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rzk2z" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerName="registry-server" containerID="cri-o://0d3f5c7a6154ed05cbcbf7ed777f330028a143a6b23a5f89b40b704192f379e1" gracePeriod=2 Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.100349 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.100522 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d" event={"ID":"0be02be5-bffe-44a9-8209-2eca5cba3e8a","Type":"ContainerDied","Data":"02cbeea1c123942e741bc9b846440134dc0da37d34e983c04011e3b38f623564"} Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.100566 4949 scope.go:117] "RemoveContainer" containerID="908214e164d5a66c733ded1317f54021fe06b8e99fcae559dc4fd6850c915c91" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.124923 4949 generic.go:334] "Generic (PLEG): container finished" podID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerID="0d3f5c7a6154ed05cbcbf7ed777f330028a143a6b23a5f89b40b704192f379e1" exitCode=0 Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.125204 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzk2z" event={"ID":"7e4a6cf5-3cdc-45de-964d-cb39392b09a3","Type":"ContainerDied","Data":"0d3f5c7a6154ed05cbcbf7ed777f330028a143a6b23a5f89b40b704192f379e1"} Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.132371 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" event={"ID":"15506987-5e2a-48f1-b631-b602d36397da","Type":"ContainerDied","Data":"1571689ae33e4ecfebccd9c0ae9ebeb71bedae250d097c09cf00d2ea0ba42b0e"} Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.132872 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb497bdbb-l9z7g" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.144443 4949 generic.go:334] "Generic (PLEG): container finished" podID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerID="79df4687b09bfc583f1dd9e43f57aa8f864680d6d556b8eaa34b443b25d597bf" exitCode=0 Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.145875 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccdhx" event={"ID":"67b23a8b-bf6c-4d65-9359-7ba9ffe71216","Type":"ContainerDied","Data":"79df4687b09bfc583f1dd9e43f57aa8f864680d6d556b8eaa34b443b25d597bf"} Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.148721 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.158337 4949 scope.go:117] "RemoveContainer" containerID="3ed6f76cbdfe7b6a7e947510f05508a720f90ea28f779426af0879b34b2d5f82" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.217159 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fb497bdbb-l9z7g"] Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.223318 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-fb497bdbb-l9z7g"] Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.241805 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-utilities\") pod \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.241885 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6zwb\" (UniqueName: \"kubernetes.io/projected/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-kube-api-access-g6zwb\") pod \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.241984 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-catalog-content\") pod \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\" (UID: \"67b23a8b-bf6c-4d65-9359-7ba9ffe71216\") " Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.249051 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-utilities" (OuterVolumeSpecName: "utilities") pod "67b23a8b-bf6c-4d65-9359-7ba9ffe71216" (UID: "67b23a8b-bf6c-4d65-9359-7ba9ffe71216"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.259570 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15506987-5e2a-48f1-b631-b602d36397da" path="/var/lib/kubelet/pods/15506987-5e2a-48f1-b631-b602d36397da/volumes" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.266658 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-kube-api-access-g6zwb" (OuterVolumeSpecName: "kube-api-access-g6zwb") pod "67b23a8b-bf6c-4d65-9359-7ba9ffe71216" (UID: "67b23a8b-bf6c-4d65-9359-7ba9ffe71216"). InnerVolumeSpecName "kube-api-access-g6zwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.267700 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d"] Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.267733 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-865bc47cdb-8gl5d"] Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.267750 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2"] Feb 16 11:10:37 crc kubenswrapper[4949]: E0216 11:10:37.267994 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0be02be5-bffe-44a9-8209-2eca5cba3e8a" containerName="route-controller-manager" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268007 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0be02be5-bffe-44a9-8209-2eca5cba3e8a" containerName="route-controller-manager" Feb 16 11:10:37 crc kubenswrapper[4949]: E0216 11:10:37.268019 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerName="extract-utilities" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268025 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerName="extract-utilities" Feb 16 11:10:37 crc kubenswrapper[4949]: E0216 11:10:37.268037 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerName="extract-utilities" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268045 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerName="extract-utilities" Feb 16 11:10:37 crc kubenswrapper[4949]: E0216 11:10:37.268059 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerName="extract-content" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268066 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerName="extract-content" Feb 16 11:10:37 crc kubenswrapper[4949]: E0216 11:10:37.268073 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerName="registry-server" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268079 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerName="registry-server" Feb 16 11:10:37 crc kubenswrapper[4949]: E0216 11:10:37.268089 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15506987-5e2a-48f1-b631-b602d36397da" containerName="controller-manager" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268095 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="15506987-5e2a-48f1-b631-b602d36397da" containerName="controller-manager" Feb 16 11:10:37 crc kubenswrapper[4949]: E0216 11:10:37.268103 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerName="extract-content" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268110 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerName="extract-content" Feb 16 11:10:37 crc kubenswrapper[4949]: E0216 11:10:37.268124 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerName="registry-server" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268130 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerName="registry-server" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268240 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="15506987-5e2a-48f1-b631-b602d36397da" containerName="controller-manager" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268256 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="5108144a-c7ec-4cd4-b792-eb6a943dce19" containerName="registry-server" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268266 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" containerName="registry-server" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.268278 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="0be02be5-bffe-44a9-8209-2eca5cba3e8a" containerName="route-controller-manager" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.273051 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz"] Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.276284 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.276377 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.278883 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2"] Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.281422 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.281656 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.282023 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.282456 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.288537 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.288755 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.288860 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.288950 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.289129 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.289390 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.289494 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.289911 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.292534 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.302801 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz"] Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.320736 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67b23a8b-bf6c-4d65-9359-7ba9ffe71216" (UID: "67b23a8b-bf6c-4d65-9359-7ba9ffe71216"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.344236 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-serving-cert\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.344383 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kdrd\" (UniqueName: \"kubernetes.io/projected/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-kube-api-access-6kdrd\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.344650 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-client-ca\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.344699 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2ed4bad-d9f4-4c35-871d-e528432aad5e-serving-cert\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.344756 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-proxy-ca-bundles\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.344860 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-config\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.344897 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhwp5\" (UniqueName: \"kubernetes.io/projected/f2ed4bad-d9f4-4c35-871d-e528432aad5e-kube-api-access-qhwp5\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.344927 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-config\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.345002 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-client-ca\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.345068 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.345091 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.345128 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6zwb\" (UniqueName: \"kubernetes.io/projected/67b23a8b-bf6c-4d65-9359-7ba9ffe71216-kube-api-access-g6zwb\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.366047 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446205 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-catalog-content\") pod \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446304 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqqzr\" (UniqueName: \"kubernetes.io/projected/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-kube-api-access-fqqzr\") pod \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446520 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-utilities\") pod \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\" (UID: \"7e4a6cf5-3cdc-45de-964d-cb39392b09a3\") " Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446732 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kdrd\" (UniqueName: \"kubernetes.io/projected/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-kube-api-access-6kdrd\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446794 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-client-ca\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446826 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2ed4bad-d9f4-4c35-871d-e528432aad5e-serving-cert\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446861 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-proxy-ca-bundles\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446910 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-config\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446935 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhwp5\" (UniqueName: \"kubernetes.io/projected/f2ed4bad-d9f4-4c35-871d-e528432aad5e-kube-api-access-qhwp5\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446960 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-config\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.446998 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-client-ca\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.447027 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-serving-cert\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.449708 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-utilities" (OuterVolumeSpecName: "utilities") pod "7e4a6cf5-3cdc-45de-964d-cb39392b09a3" (UID: "7e4a6cf5-3cdc-45de-964d-cb39392b09a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.450071 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-proxy-ca-bundles\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.450215 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-config\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.450503 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-client-ca\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.451039 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-config\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.451808 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2ed4bad-d9f4-4c35-871d-e528432aad5e-client-ca\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.455844 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-serving-cert\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.456103 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-kube-api-access-fqqzr" (OuterVolumeSpecName: "kube-api-access-fqqzr") pod "7e4a6cf5-3cdc-45de-964d-cb39392b09a3" (UID: "7e4a6cf5-3cdc-45de-964d-cb39392b09a3"). InnerVolumeSpecName "kube-api-access-fqqzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.471031 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2ed4bad-d9f4-4c35-871d-e528432aad5e-serving-cert\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.475968 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhwp5\" (UniqueName: \"kubernetes.io/projected/f2ed4bad-d9f4-4c35-871d-e528432aad5e-kube-api-access-qhwp5\") pod \"controller-manager-6f6bc6567b-n4jq2\" (UID: \"f2ed4bad-d9f4-4c35-871d-e528432aad5e\") " pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.476269 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kdrd\" (UniqueName: \"kubernetes.io/projected/5d25d6d8-fad9-4f7e-a6a5-53588c7202f2-kube-api-access-6kdrd\") pod \"route-controller-manager-6684df87d8-xmfdz\" (UID: \"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2\") " pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.549106 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.549154 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqqzr\" (UniqueName: \"kubernetes.io/projected/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-kube-api-access-fqqzr\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.585717 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e4a6cf5-3cdc-45de-964d-cb39392b09a3" (UID: "7e4a6cf5-3cdc-45de-964d-cb39392b09a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.623911 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.631815 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.651161 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e4a6cf5-3cdc-45de-964d-cb39392b09a3-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:37 crc kubenswrapper[4949]: I0216 11:10:37.982732 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2"] Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.116268 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz"] Feb 16 11:10:38 crc kubenswrapper[4949]: W0216 11:10:38.125405 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d25d6d8_fad9_4f7e_a6a5_53588c7202f2.slice/crio-84a1321e1f2e9be937fc672a469bab0a872543126bb79e02317693213e18ebc9 WatchSource:0}: Error finding container 84a1321e1f2e9be937fc672a469bab0a872543126bb79e02317693213e18ebc9: Status 404 returned error can't find the container with id 84a1321e1f2e9be937fc672a469bab0a872543126bb79e02317693213e18ebc9 Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.155772 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" event={"ID":"f2ed4bad-d9f4-4c35-871d-e528432aad5e","Type":"ContainerStarted","Data":"99c298e34c9cbd412b0770dff0b8901ddbdabe50b4356e91072e7b3fdc7e9dd5"} Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.166530 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzk2z" event={"ID":"7e4a6cf5-3cdc-45de-964d-cb39392b09a3","Type":"ContainerDied","Data":"373c2113acfe58cd8bc37feaa8fdf4d01b10b6e37d9833166268bd965c4d8217"} Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.166616 4949 scope.go:117] "RemoveContainer" containerID="0d3f5c7a6154ed05cbcbf7ed777f330028a143a6b23a5f89b40b704192f379e1" Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.166781 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzk2z" Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.182020 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccdhx" event={"ID":"67b23a8b-bf6c-4d65-9359-7ba9ffe71216","Type":"ContainerDied","Data":"d4a5934ee78d299f172ca87798a5d501a4dc4e8a898301f4b2ec431585113b1e"} Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.182117 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ccdhx" Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.183124 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" event={"ID":"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2","Type":"ContainerStarted","Data":"84a1321e1f2e9be937fc672a469bab0a872543126bb79e02317693213e18ebc9"} Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.208958 4949 scope.go:117] "RemoveContainer" containerID="713c73e9ae1c0397ee0001441f8c155b48da5394dec962cd5397c20d351a6dc8" Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.217253 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rzk2z"] Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.226467 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rzk2z"] Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.240910 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccdhx"] Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.245685 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccdhx"] Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.246055 4949 scope.go:117] "RemoveContainer" containerID="9e01da32733c13e7ed0fa15e674c06ed6382fc07111b0a4c3503f2028ff15ecd" Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.265061 4949 scope.go:117] "RemoveContainer" containerID="79df4687b09bfc583f1dd9e43f57aa8f864680d6d556b8eaa34b443b25d597bf" Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.281315 4949 scope.go:117] "RemoveContainer" containerID="05368bb10d7d7deb2a4fe5170709b0f721fc7b58fe7f885e5cf252778d0a5f81" Feb 16 11:10:38 crc kubenswrapper[4949]: I0216 11:10:38.297123 4949 scope.go:117] "RemoveContainer" containerID="1a98d63d4db63c567746e3b28866fdf9e07ab1bdced169aa4098d558953d47d1" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.191772 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" event={"ID":"f2ed4bad-d9f4-4c35-871d-e528432aad5e","Type":"ContainerStarted","Data":"5e3bbc36f689d07b01dbc7b5d11dfd86fdbd33c93ce16e030c19bd2e1352ebf7"} Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.192048 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.196189 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" event={"ID":"5d25d6d8-fad9-4f7e-a6a5-53588c7202f2","Type":"ContainerStarted","Data":"711cf45ad2eddd637724a09cc1f6e37545849d344ea7c72dbea5b1d04fa36117"} Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.196490 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.200932 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.201770 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.217945 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6f6bc6567b-n4jq2" podStartSLOduration=4.217917803 podStartE2EDuration="4.217917803s" podCreationTimestamp="2026-02-16 11:10:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:10:39.215189722 +0000 UTC m=+228.844523897" watchObservedRunningTime="2026-02-16 11:10:39.217917803 +0000 UTC m=+228.847251968" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.236758 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6684df87d8-xmfdz" podStartSLOduration=4.236728024 podStartE2EDuration="4.236728024s" podCreationTimestamp="2026-02-16 11:10:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:10:39.234977272 +0000 UTC m=+228.864311437" watchObservedRunningTime="2026-02-16 11:10:39.236728024 +0000 UTC m=+228.866062189" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.245132 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0be02be5-bffe-44a9-8209-2eca5cba3e8a" path="/var/lib/kubelet/pods/0be02be5-bffe-44a9-8209-2eca5cba3e8a/volumes" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.246251 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67b23a8b-bf6c-4d65-9359-7ba9ffe71216" path="/var/lib/kubelet/pods/67b23a8b-bf6c-4d65-9359-7ba9ffe71216/volumes" Feb 16 11:10:39 crc kubenswrapper[4949]: I0216 11:10:39.247161 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" path="/var/lib/kubelet/pods/7e4a6cf5-3cdc-45de-964d-cb39392b09a3/volumes" Feb 16 11:10:40 crc kubenswrapper[4949]: I0216 11:10:40.610584 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:10:43 crc kubenswrapper[4949]: I0216 11:10:43.283515 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wnqwb"] Feb 16 11:10:43 crc kubenswrapper[4949]: I0216 11:10:43.284036 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wnqwb" podUID="d8668558-9b23-4195-9816-7f9034a699e8" containerName="registry-server" containerID="cri-o://8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420" gracePeriod=2 Feb 16 11:10:43 crc kubenswrapper[4949]: E0216 11:10:43.436078 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8668558_9b23_4195_9816_7f9034a699e8.slice/crio-8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420.scope\": RecentStats: unable to find data in memory cache]" Feb 16 11:10:43 crc kubenswrapper[4949]: I0216 11:10:43.879720 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:10:43 crc kubenswrapper[4949]: I0216 11:10:43.967800 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5f5w2\" (UniqueName: \"kubernetes.io/projected/d8668558-9b23-4195-9816-7f9034a699e8-kube-api-access-5f5w2\") pod \"d8668558-9b23-4195-9816-7f9034a699e8\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " Feb 16 11:10:43 crc kubenswrapper[4949]: I0216 11:10:43.968006 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-utilities\") pod \"d8668558-9b23-4195-9816-7f9034a699e8\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " Feb 16 11:10:43 crc kubenswrapper[4949]: I0216 11:10:43.968054 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-catalog-content\") pod \"d8668558-9b23-4195-9816-7f9034a699e8\" (UID: \"d8668558-9b23-4195-9816-7f9034a699e8\") " Feb 16 11:10:43 crc kubenswrapper[4949]: I0216 11:10:43.969471 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-utilities" (OuterVolumeSpecName: "utilities") pod "d8668558-9b23-4195-9816-7f9034a699e8" (UID: "d8668558-9b23-4195-9816-7f9034a699e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:10:43 crc kubenswrapper[4949]: I0216 11:10:43.973980 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8668558-9b23-4195-9816-7f9034a699e8-kube-api-access-5f5w2" (OuterVolumeSpecName: "kube-api-access-5f5w2") pod "d8668558-9b23-4195-9816-7f9034a699e8" (UID: "d8668558-9b23-4195-9816-7f9034a699e8"). InnerVolumeSpecName "kube-api-access-5f5w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.023145 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8668558-9b23-4195-9816-7f9034a699e8" (UID: "d8668558-9b23-4195-9816-7f9034a699e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.071009 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.071117 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8668558-9b23-4195-9816-7f9034a699e8-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.071139 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5f5w2\" (UniqueName: \"kubernetes.io/projected/d8668558-9b23-4195-9816-7f9034a699e8-kube-api-access-5f5w2\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.234383 4949 generic.go:334] "Generic (PLEG): container finished" podID="d8668558-9b23-4195-9816-7f9034a699e8" containerID="8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420" exitCode=0 Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.234467 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wnqwb" event={"ID":"d8668558-9b23-4195-9816-7f9034a699e8","Type":"ContainerDied","Data":"8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420"} Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.234517 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wnqwb" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.234595 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wnqwb" event={"ID":"d8668558-9b23-4195-9816-7f9034a699e8","Type":"ContainerDied","Data":"9b1c7f98d327be6b58c2e7011b854b22cb132f1b1aa5d43fb3b63251a98896b7"} Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.234623 4949 scope.go:117] "RemoveContainer" containerID="8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.273353 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wnqwb"] Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.275435 4949 scope.go:117] "RemoveContainer" containerID="a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.276336 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wnqwb"] Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.315494 4949 scope.go:117] "RemoveContainer" containerID="5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.331339 4949 scope.go:117] "RemoveContainer" containerID="8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420" Feb 16 11:10:44 crc kubenswrapper[4949]: E0216 11:10:44.331988 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420\": container with ID starting with 8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420 not found: ID does not exist" containerID="8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.332056 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420"} err="failed to get container status \"8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420\": rpc error: code = NotFound desc = could not find container \"8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420\": container with ID starting with 8ddca7a45bff667fb83f95c9bdac56e9ee7820f1030845867111899a1a10c420 not found: ID does not exist" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.332101 4949 scope.go:117] "RemoveContainer" containerID="a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77" Feb 16 11:10:44 crc kubenswrapper[4949]: E0216 11:10:44.332992 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77\": container with ID starting with a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77 not found: ID does not exist" containerID="a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.333035 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77"} err="failed to get container status \"a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77\": rpc error: code = NotFound desc = could not find container \"a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77\": container with ID starting with a72c54aa85263bcb0aee2a04fc44c88c25230c61bce2583728e88f4881e6dd77 not found: ID does not exist" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.333077 4949 scope.go:117] "RemoveContainer" containerID="5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94" Feb 16 11:10:44 crc kubenswrapper[4949]: E0216 11:10:44.333443 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94\": container with ID starting with 5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94 not found: ID does not exist" containerID="5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94" Feb 16 11:10:44 crc kubenswrapper[4949]: I0216 11:10:44.333471 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94"} err="failed to get container status \"5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94\": rpc error: code = NotFound desc = could not find container \"5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94\": container with ID starting with 5f47c1d7893a103493b9c82a4cf23f467d14a83fb73b2944a174cb2b8afddb94 not found: ID does not exist" Feb 16 11:10:45 crc kubenswrapper[4949]: I0216 11:10:45.242666 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8668558-9b23-4195-9816-7f9034a699e8" path="/var/lib/kubelet/pods/d8668558-9b23-4195-9816-7f9034a699e8/volumes" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.735830 4949 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.736230 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerName="extract-content" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.736247 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerName="extract-content" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.736260 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8668558-9b23-4195-9816-7f9034a699e8" containerName="registry-server" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.736266 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8668558-9b23-4195-9816-7f9034a699e8" containerName="registry-server" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.736285 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8668558-9b23-4195-9816-7f9034a699e8" containerName="extract-content" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.736292 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8668558-9b23-4195-9816-7f9034a699e8" containerName="extract-content" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.736317 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerName="extract-utilities" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.736324 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerName="extract-utilities" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.736334 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8668558-9b23-4195-9816-7f9034a699e8" containerName="extract-utilities" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.736341 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8668558-9b23-4195-9816-7f9034a699e8" containerName="extract-utilities" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.736348 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerName="registry-server" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.736354 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerName="registry-server" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.736464 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8668558-9b23-4195-9816-7f9034a699e8" containerName="registry-server" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.736476 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e4a6cf5-3cdc-45de-964d-cb39392b09a3" containerName="registry-server" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.737003 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.777989 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.820558 4949 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.820886 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd" gracePeriod=15 Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.821039 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9" gracePeriod=15 Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.821091 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49" gracePeriod=15 Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.821123 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec" gracePeriod=15 Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.821152 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809" gracePeriod=15 Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823188 4949 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.823557 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823582 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.823597 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823609 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.823634 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823643 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.823657 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823668 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.823684 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823694 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.823707 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823715 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.823727 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823736 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823875 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823902 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823918 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823929 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823939 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823951 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.823966 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 16 11:10:47 crc kubenswrapper[4949]: E0216 11:10:47.824108 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.824118 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.840197 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.840277 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.840316 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.840350 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.840411 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.840535 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.840570 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.840614 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.941718 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.941837 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.941900 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.941914 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.941971 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.941993 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942052 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942066 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942126 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942136 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942160 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942210 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942423 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942514 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.942519 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:47 crc kubenswrapper[4949]: I0216 11:10:47.943327 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.076381 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:10:48 crc kubenswrapper[4949]: W0216 11:10:48.101797 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-2bf6d4ca1ab0c745f45d009035bce30b0bc2343fe8918889b2049767c7364947 WatchSource:0}: Error finding container 2bf6d4ca1ab0c745f45d009035bce30b0bc2343fe8918889b2049767c7364947: Status 404 returned error can't find the container with id 2bf6d4ca1ab0c745f45d009035bce30b0bc2343fe8918889b2049767c7364947 Feb 16 11:10:48 crc kubenswrapper[4949]: E0216 11:10:48.106539 4949 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.129:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.1894b59f08319817 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-16 11:10:48.105342999 +0000 UTC m=+237.734677164,LastTimestamp:2026-02-16 11:10:48.105342999 +0000 UTC m=+237.734677164,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.266111 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"2bf6d4ca1ab0c745f45d009035bce30b0bc2343fe8918889b2049767c7364947"} Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.270412 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.272037 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.273057 4949 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9" exitCode=0 Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.273105 4949 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49" exitCode=0 Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.273113 4949 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec" exitCode=0 Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.273127 4949 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809" exitCode=2 Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.273183 4949 scope.go:117] "RemoveContainer" containerID="7b8ef73d310cce4e7c0bfbe5ac3dad7a26c8287f5edba07b8b6830d6d6011546" Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.276273 4949 generic.go:334] "Generic (PLEG): container finished" podID="594902df-fdc6-4c21-9c22-8b4fd690d408" containerID="5e2152d0c3f27a0c146929394437a4b06579b4e491dae116c0d0edf0efcd60b9" exitCode=0 Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.276308 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"594902df-fdc6-4c21-9c22-8b4fd690d408","Type":"ContainerDied","Data":"5e2152d0c3f27a0c146929394437a4b06579b4e491dae116c0d0edf0efcd60b9"} Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.277450 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.277744 4949 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.278093 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:48 crc kubenswrapper[4949]: E0216 11:10:48.337266 4949 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.129:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" volumeName="registry-storage" Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.527205 4949 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Feb 16 11:10:48 crc kubenswrapper[4949]: I0216 11:10:48.527264 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.284964 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36"} Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.286628 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.287089 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.289616 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.705751 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.707228 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.707996 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.773709 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-kubelet-dir\") pod \"594902df-fdc6-4c21-9c22-8b4fd690d408\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.773808 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-var-lock\") pod \"594902df-fdc6-4c21-9c22-8b4fd690d408\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.773839 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/594902df-fdc6-4c21-9c22-8b4fd690d408-kube-api-access\") pod \"594902df-fdc6-4c21-9c22-8b4fd690d408\" (UID: \"594902df-fdc6-4c21-9c22-8b4fd690d408\") " Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.773881 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "594902df-fdc6-4c21-9c22-8b4fd690d408" (UID: "594902df-fdc6-4c21-9c22-8b4fd690d408"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.773957 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-var-lock" (OuterVolumeSpecName: "var-lock") pod "594902df-fdc6-4c21-9c22-8b4fd690d408" (UID: "594902df-fdc6-4c21-9c22-8b4fd690d408"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.774204 4949 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.774220 4949 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/594902df-fdc6-4c21-9c22-8b4fd690d408-var-lock\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.782985 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/594902df-fdc6-4c21-9c22-8b4fd690d408-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "594902df-fdc6-4c21-9c22-8b4fd690d408" (UID: "594902df-fdc6-4c21-9c22-8b4fd690d408"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:49 crc kubenswrapper[4949]: I0216 11:10:49.876462 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/594902df-fdc6-4c21-9c22-8b4fd690d408-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.283937 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.285488 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.287094 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.287740 4949 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.288058 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.305912 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.307166 4949 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd" exitCode=0 Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.307252 4949 scope.go:117] "RemoveContainer" containerID="cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.307301 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.310897 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"594902df-fdc6-4c21-9c22-8b4fd690d408","Type":"ContainerDied","Data":"cdb8d46f98fe52c02793ce3d0f2a989b2c6754e617d7d4536a7f64f0dd247225"} Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.310957 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cdb8d46f98fe52c02793ce3d0f2a989b2c6754e617d7d4536a7f64f0dd247225" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.310979 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.326287 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.326823 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.327153 4949 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.331094 4949 scope.go:117] "RemoveContainer" containerID="dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.343589 4949 scope.go:117] "RemoveContainer" containerID="d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.358405 4949 scope.go:117] "RemoveContainer" containerID="b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.370997 4949 scope.go:117] "RemoveContainer" containerID="0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387305 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387422 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387451 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387439 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387512 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387609 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387876 4949 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387893 4949 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.387903 4949 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.389939 4949 scope.go:117] "RemoveContainer" containerID="e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.410103 4949 scope.go:117] "RemoveContainer" containerID="cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9" Feb 16 11:10:50 crc kubenswrapper[4949]: E0216 11:10:50.410760 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\": container with ID starting with cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9 not found: ID does not exist" containerID="cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.410844 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9"} err="failed to get container status \"cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\": rpc error: code = NotFound desc = could not find container \"cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9\": container with ID starting with cb4e6265ce5aefd48ee356fb002a61e38aed11e42968db8945ffeaba1d7ee3c9 not found: ID does not exist" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.410913 4949 scope.go:117] "RemoveContainer" containerID="dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49" Feb 16 11:10:50 crc kubenswrapper[4949]: E0216 11:10:50.411284 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\": container with ID starting with dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49 not found: ID does not exist" containerID="dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.411337 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49"} err="failed to get container status \"dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\": rpc error: code = NotFound desc = could not find container \"dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49\": container with ID starting with dc2f87ab3d49ad6cffa0c7dffc7b71b91ef6ae26e9a4213752df13cb5d7a6c49 not found: ID does not exist" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.411369 4949 scope.go:117] "RemoveContainer" containerID="d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec" Feb 16 11:10:50 crc kubenswrapper[4949]: E0216 11:10:50.411638 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\": container with ID starting with d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec not found: ID does not exist" containerID="d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.411687 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec"} err="failed to get container status \"d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\": rpc error: code = NotFound desc = could not find container \"d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec\": container with ID starting with d8a759a92db443101029584ee900f75d3125bd6c0bcb126b74a4c8a423bdafec not found: ID does not exist" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.411717 4949 scope.go:117] "RemoveContainer" containerID="b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809" Feb 16 11:10:50 crc kubenswrapper[4949]: E0216 11:10:50.412688 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\": container with ID starting with b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809 not found: ID does not exist" containerID="b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.412770 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809"} err="failed to get container status \"b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\": rpc error: code = NotFound desc = could not find container \"b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809\": container with ID starting with b43cea27fe2c5f4b8991e1c6cb28ee1c6f94d95f7a6e740d5b0410dbcd2fc809 not found: ID does not exist" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.412804 4949 scope.go:117] "RemoveContainer" containerID="0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd" Feb 16 11:10:50 crc kubenswrapper[4949]: E0216 11:10:50.413233 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\": container with ID starting with 0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd not found: ID does not exist" containerID="0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.413321 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd"} err="failed to get container status \"0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\": rpc error: code = NotFound desc = could not find container \"0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd\": container with ID starting with 0a18ff441b377c115f1dc716cd46fa45c40e4b70d6f1f38ca586efd201ac66bd not found: ID does not exist" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.413350 4949 scope.go:117] "RemoveContainer" containerID="e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503" Feb 16 11:10:50 crc kubenswrapper[4949]: E0216 11:10:50.413707 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\": container with ID starting with e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503 not found: ID does not exist" containerID="e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.413783 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503"} err="failed to get container status \"e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\": rpc error: code = NotFound desc = could not find container \"e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503\": container with ID starting with e26e21eb7877c29155a91a77f6e6eadbcbae9c7cd48a4964c6a82ae7b3732503 not found: ID does not exist" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.627964 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.628631 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:50 crc kubenswrapper[4949]: I0216 11:10:50.629324 4949 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: I0216 11:10:51.242062 4949 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: I0216 11:10:51.244711 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: I0216 11:10:51.245074 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: I0216 11:10:51.245475 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Feb 16 11:10:51 crc kubenswrapper[4949]: E0216 11:10:51.732135 4949 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: E0216 11:10:51.732753 4949 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: E0216 11:10:51.733304 4949 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: E0216 11:10:51.733930 4949 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: E0216 11:10:51.734276 4949 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:51 crc kubenswrapper[4949]: I0216 11:10:51.734326 4949 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Feb 16 11:10:51 crc kubenswrapper[4949]: E0216 11:10:51.734790 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="200ms" Feb 16 11:10:51 crc kubenswrapper[4949]: E0216 11:10:51.760441 4949 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.129:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.1894b59f08319817 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-16 11:10:48.105342999 +0000 UTC m=+237.734677164,LastTimestamp:2026-02-16 11:10:48.105342999 +0000 UTC m=+237.734677164,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 16 11:10:51 crc kubenswrapper[4949]: E0216 11:10:51.936801 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="400ms" Feb 16 11:10:52 crc kubenswrapper[4949]: E0216 11:10:52.339061 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="800ms" Feb 16 11:10:53 crc kubenswrapper[4949]: E0216 11:10:53.140290 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="1.6s" Feb 16 11:10:54 crc kubenswrapper[4949]: E0216 11:10:54.741712 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="3.2s" Feb 16 11:10:56 crc kubenswrapper[4949]: I0216 11:10:56.421232 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" containerName="oauth-openshift" containerID="cri-o://0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a" gracePeriod=15 Feb 16 11:10:56 crc kubenswrapper[4949]: I0216 11:10:56.991886 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:10:56 crc kubenswrapper[4949]: I0216 11:10:56.992922 4949 status_manager.go:851] "Failed to get status for pod" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-g4khj\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:56 crc kubenswrapper[4949]: I0216 11:10:56.993154 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:56 crc kubenswrapper[4949]: I0216 11:10:56.993482 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.083804 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-audit-policies\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.083869 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-error\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.083920 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-trusted-ca-bundle\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.083954 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-ocp-branding-template\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.083994 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-serving-cert\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.084029 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-login\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.084059 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-provider-selection\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.084101 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5e66d330-dc75-4a98-9739-589d8df61a68-audit-dir\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.084134 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-cliconfig\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.084192 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntsvz\" (UniqueName: \"kubernetes.io/projected/5e66d330-dc75-4a98-9739-589d8df61a68-kube-api-access-ntsvz\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.084457 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5e66d330-dc75-4a98-9739-589d8df61a68-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.085341 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.084219 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-idp-0-file-data\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.085353 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.085482 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-router-certs\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.085509 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-service-ca\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.085537 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-session\") pod \"5e66d330-dc75-4a98-9739-589d8df61a68\" (UID: \"5e66d330-dc75-4a98-9739-589d8df61a68\") " Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.085805 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.086202 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.086504 4949 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.086517 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.086529 4949 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5e66d330-dc75-4a98-9739-589d8df61a68-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.086541 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.086550 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.093268 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.093680 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.094026 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.094874 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.094984 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.095548 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.095866 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.098564 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e66d330-dc75-4a98-9739-589d8df61a68-kube-api-access-ntsvz" (OuterVolumeSpecName: "kube-api-access-ntsvz") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "kube-api-access-ntsvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.099434 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "5e66d330-dc75-4a98-9739-589d8df61a68" (UID: "5e66d330-dc75-4a98-9739-589d8df61a68"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.188407 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.188870 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntsvz\" (UniqueName: \"kubernetes.io/projected/5e66d330-dc75-4a98-9739-589d8df61a68-kube-api-access-ntsvz\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.188977 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.189071 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.189159 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.189307 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.189377 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.189454 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.189518 4949 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5e66d330-dc75-4a98-9739-589d8df61a68-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.356744 4949 generic.go:334] "Generic (PLEG): container finished" podID="5e66d330-dc75-4a98-9739-589d8df61a68" containerID="0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a" exitCode=0 Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.356795 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" event={"ID":"5e66d330-dc75-4a98-9739-589d8df61a68","Type":"ContainerDied","Data":"0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a"} Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.356826 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" event={"ID":"5e66d330-dc75-4a98-9739-589d8df61a68","Type":"ContainerDied","Data":"ca21aadffd68ddf21e7c12f2d3627c16f2da9ce85a1bce9526d1bcdddde78288"} Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.356846 4949 scope.go:117] "RemoveContainer" containerID="0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.357189 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.358222 4949 status_manager.go:851] "Failed to get status for pod" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-g4khj\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.358965 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.359343 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.363849 4949 status_manager.go:851] "Failed to get status for pod" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-g4khj\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.364370 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.364809 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.375450 4949 scope.go:117] "RemoveContainer" containerID="0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a" Feb 16 11:10:57 crc kubenswrapper[4949]: E0216 11:10:57.375939 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a\": container with ID starting with 0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a not found: ID does not exist" containerID="0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a" Feb 16 11:10:57 crc kubenswrapper[4949]: I0216 11:10:57.375986 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a"} err="failed to get container status \"0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a\": rpc error: code = NotFound desc = could not find container \"0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a\": container with ID starting with 0e35dcf0f645c95599314bb39f890a89d2c876f9abda5da2e0a8a519b6a9245a not found: ID does not exist" Feb 16 11:10:57 crc kubenswrapper[4949]: E0216 11:10:57.943156 4949 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" interval="6.4s" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.239320 4949 status_manager.go:851] "Failed to get status for pod" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-g4khj\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.242585 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.243281 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.250233 4949 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.250329 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.388807 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.389324 4949 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6" exitCode=1 Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.389397 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6"} Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.390468 4949 scope.go:117] "RemoveContainer" containerID="68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.390720 4949 status_manager.go:851] "Failed to get status for pod" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-g4khj\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.391551 4949 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.392681 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: I0216 11:11:01.393194 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: E0216 11:11:01.576395 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:11:01Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:11:01Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:11:01Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-16T11:11:01Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: E0216 11:11:01.576624 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: E0216 11:11:01.577037 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: E0216 11:11:01.577624 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: E0216 11:11:01.577923 4949 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:01 crc kubenswrapper[4949]: E0216 11:11:01.577955 4949 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 16 11:11:01 crc kubenswrapper[4949]: E0216 11:11:01.761961 4949 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.129:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.1894b59f08319817 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-16 11:10:48.105342999 +0000 UTC m=+237.734677164,LastTimestamp:2026-02-16 11:10:48.105342999 +0000 UTC m=+237.734677164,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.234447 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.235823 4949 status_manager.go:851] "Failed to get status for pod" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-g4khj\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.236669 4949 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.237333 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.237721 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.248064 4949 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.248130 4949 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:02 crc kubenswrapper[4949]: E0216 11:11:02.249072 4949 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.250029 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:02 crc kubenswrapper[4949]: W0216 11:11:02.284482 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-220611a0401c52a457ded22cc8575caa8f2b01d12d982d704006da8c2c0afe9e WatchSource:0}: Error finding container 220611a0401c52a457ded22cc8575caa8f2b01d12d982d704006da8c2c0afe9e: Status 404 returned error can't find the container with id 220611a0401c52a457ded22cc8575caa8f2b01d12d982d704006da8c2c0afe9e Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.399876 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"220611a0401c52a457ded22cc8575caa8f2b01d12d982d704006da8c2c0afe9e"} Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.404354 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.404479 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f2db7eaefab4e9796abba64513a3e9e4ae09fc9aab222cad4b27257a62f8ee4f"} Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.405616 4949 status_manager.go:851] "Failed to get status for pod" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-g4khj\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.406414 4949 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.406893 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:02 crc kubenswrapper[4949]: I0216 11:11:02.407821 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:03 crc kubenswrapper[4949]: I0216 11:11:03.414511 4949 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="41ecc1187382cce59a78cc453e9b3688f10281744ed9b3fb7193ce4fa2b1eaf9" exitCode=0 Feb 16 11:11:03 crc kubenswrapper[4949]: I0216 11:11:03.414576 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"41ecc1187382cce59a78cc453e9b3688f10281744ed9b3fb7193ce4fa2b1eaf9"} Feb 16 11:11:03 crc kubenswrapper[4949]: I0216 11:11:03.415088 4949 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:03 crc kubenswrapper[4949]: I0216 11:11:03.415112 4949 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:03 crc kubenswrapper[4949]: E0216 11:11:03.415688 4949 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:03 crc kubenswrapper[4949]: I0216 11:11:03.415842 4949 status_manager.go:851] "Failed to get status for pod" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" pod="openshift-authentication/oauth-openshift-558db77b4-g4khj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-g4khj\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:03 crc kubenswrapper[4949]: I0216 11:11:03.416190 4949 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:03 crc kubenswrapper[4949]: I0216 11:11:03.416510 4949 status_manager.go:851] "Failed to get status for pod" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:03 crc kubenswrapper[4949]: I0216 11:11:03.416808 4949 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.129:6443: connect: connection refused" Feb 16 11:11:04 crc kubenswrapper[4949]: I0216 11:11:04.427814 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f66eb0c76d345803927bba83dbc40adf86df4174471e97e41ac006b6e16081d6"} Feb 16 11:11:04 crc kubenswrapper[4949]: I0216 11:11:04.428191 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"35c13fca6cb4b131d3ad5ae3b65ecb674d266be60922156f3564e75682d6aa5a"} Feb 16 11:11:04 crc kubenswrapper[4949]: I0216 11:11:04.428201 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0f0238482d8b0838ba1673e754c0fabf6327499709e19bc2dc7d1d9ba5c0cf0d"} Feb 16 11:11:04 crc kubenswrapper[4949]: I0216 11:11:04.776905 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:11:04 crc kubenswrapper[4949]: I0216 11:11:04.777489 4949 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Feb 16 11:11:04 crc kubenswrapper[4949]: I0216 11:11:04.777631 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Feb 16 11:11:05 crc kubenswrapper[4949]: I0216 11:11:05.438635 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fa6590907adfcc5f227b01dc507e3b2c18e13d94b1ace50d25b90146e68341df"} Feb 16 11:11:05 crc kubenswrapper[4949]: I0216 11:11:05.438714 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"04abd3873232957a96235b5b6333f8a9717a2ce83c2d3f07d4be9a3a3a8abd8f"} Feb 16 11:11:05 crc kubenswrapper[4949]: I0216 11:11:05.438837 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:05 crc kubenswrapper[4949]: I0216 11:11:05.438952 4949 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:05 crc kubenswrapper[4949]: I0216 11:11:05.438975 4949 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:06 crc kubenswrapper[4949]: I0216 11:11:06.986112 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:11:07 crc kubenswrapper[4949]: I0216 11:11:07.250870 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:07 crc kubenswrapper[4949]: I0216 11:11:07.250925 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:07 crc kubenswrapper[4949]: I0216 11:11:07.258053 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:10 crc kubenswrapper[4949]: I0216 11:11:10.452161 4949 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:10 crc kubenswrapper[4949]: I0216 11:11:10.477532 4949 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:10 crc kubenswrapper[4949]: I0216 11:11:10.477576 4949 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:10 crc kubenswrapper[4949]: I0216 11:11:10.484318 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:11 crc kubenswrapper[4949]: I0216 11:11:11.246695 4949 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="cb351c8e-0c47-4438-924a-ab9636036652" Feb 16 11:11:11 crc kubenswrapper[4949]: I0216 11:11:11.482297 4949 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:11 crc kubenswrapper[4949]: I0216 11:11:11.482335 4949 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:11 crc kubenswrapper[4949]: I0216 11:11:11.485842 4949 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="cb351c8e-0c47-4438-924a-ab9636036652" Feb 16 11:11:14 crc kubenswrapper[4949]: I0216 11:11:14.777118 4949 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Feb 16 11:11:14 crc kubenswrapper[4949]: I0216 11:11:14.777503 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Feb 16 11:11:20 crc kubenswrapper[4949]: I0216 11:11:20.550997 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 16 11:11:21 crc kubenswrapper[4949]: I0216 11:11:21.318612 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 16 11:11:21 crc kubenswrapper[4949]: I0216 11:11:21.441140 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 16 11:11:21 crc kubenswrapper[4949]: I0216 11:11:21.593531 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 16 11:11:21 crc kubenswrapper[4949]: I0216 11:11:21.705270 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 16 11:11:21 crc kubenswrapper[4949]: I0216 11:11:21.815851 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 16 11:11:21 crc kubenswrapper[4949]: I0216 11:11:21.920618 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 16 11:11:22 crc kubenswrapper[4949]: I0216 11:11:22.462873 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 16 11:11:22 crc kubenswrapper[4949]: I0216 11:11:22.533104 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 16 11:11:22 crc kubenswrapper[4949]: I0216 11:11:22.593327 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 16 11:11:22 crc kubenswrapper[4949]: I0216 11:11:22.645474 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 16 11:11:22 crc kubenswrapper[4949]: I0216 11:11:22.717388 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 16 11:11:22 crc kubenswrapper[4949]: I0216 11:11:22.735639 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.024135 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.072474 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.105598 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.264437 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.282166 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.296368 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.297572 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.319323 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.421496 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.449406 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.469940 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.518943 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.586159 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.812617 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 16 11:11:23 crc kubenswrapper[4949]: I0216 11:11:23.866063 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.025623 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.070496 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.073000 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.147294 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.163647 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.176636 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.191492 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.237840 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.280126 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.294305 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.457027 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.573497 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.777741 4949 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.777831 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.777927 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.778905 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"f2db7eaefab4e9796abba64513a3e9e4ae09fc9aab222cad4b27257a62f8ee4f"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.779081 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://f2db7eaefab4e9796abba64513a3e9e4ae09fc9aab222cad4b27257a62f8ee4f" gracePeriod=30 Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.779857 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.864018 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 16 11:11:24 crc kubenswrapper[4949]: I0216 11:11:24.962100 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.001683 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.117942 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.120220 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.257151 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.264527 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.303956 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.328349 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.384997 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.393770 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.467119 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.497598 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.502388 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.509913 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.698413 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.805025 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.815479 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.851897 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.857573 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 16 11:11:25 crc kubenswrapper[4949]: I0216 11:11:25.951581 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.010969 4949 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.136676 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.355560 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.373089 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.542504 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.587145 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.630232 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.670475 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.712095 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.757523 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.765527 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.766393 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.806330 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.862575 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.864162 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.934746 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.941026 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.961742 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 16 11:11:26 crc kubenswrapper[4949]: I0216 11:11:26.976405 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.200360 4949 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.223201 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.256528 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.309760 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.329254 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.357646 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.399716 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.457361 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.459652 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.477750 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.546390 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.549942 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.683119 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.742350 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.752392 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.800133 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.897634 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.924273 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 16 11:11:27 crc kubenswrapper[4949]: I0216 11:11:27.940142 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.008939 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.036509 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.066140 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.115585 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.198065 4949 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.314420 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.379535 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.432058 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.433905 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.462994 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.683275 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.701983 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.786898 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.811598 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.828587 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.878297 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.931493 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.940740 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.964563 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 16 11:11:28 crc kubenswrapper[4949]: I0216 11:11:28.977801 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.035242 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.042862 4949 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.053909 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.082674 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.130934 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.134294 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.253565 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.270815 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.274457 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.342986 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.347255 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.390934 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.398077 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.490390 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.612299 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.637295 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.660965 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.715591 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.721223 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.746993 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.760084 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.844162 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.862743 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.880725 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.956763 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.962135 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 16 11:11:29 crc kubenswrapper[4949]: I0216 11:11:29.975823 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.090925 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.107363 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.117395 4949 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.120426 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=43.120397491 podStartE2EDuration="43.120397491s" podCreationTimestamp="2026-02-16 11:10:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:11:10.4674807 +0000 UTC m=+260.096814905" watchObservedRunningTime="2026-02-16 11:11:30.120397491 +0000 UTC m=+279.749731656" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.123038 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-g4khj"] Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.123114 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-7b49777cd7-fmkg6"] Feb 16 11:11:30 crc kubenswrapper[4949]: E0216 11:11:30.123469 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" containerName="oauth-openshift" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.123494 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" containerName="oauth-openshift" Feb 16 11:11:30 crc kubenswrapper[4949]: E0216 11:11:30.123527 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" containerName="installer" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.123536 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" containerName="installer" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.123637 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" containerName="oauth-openshift" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.123654 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="594902df-fdc6-4c21-9c22-8b4fd690d408" containerName="installer" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.123680 4949 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.123713 4949 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="aada2690-8d5f-4854-bc83-59906010e8ec" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.124198 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.135659 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.135823 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.135958 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.136023 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.139657 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.139927 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.140616 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.141409 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.141557 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.141715 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.141958 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.142116 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.142276 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.147773 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.152851 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.155737 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.161348 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.162959 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=20.162940695 podStartE2EDuration="20.162940695s" podCreationTimestamp="2026-02-16 11:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:11:30.156711604 +0000 UTC m=+279.786045769" watchObservedRunningTime="2026-02-16 11:11:30.162940695 +0000 UTC m=+279.792274860" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.310581 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311243 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-session\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311332 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311350 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-error\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311371 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311744 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5498ab7d-3a96-4342-8670-9882b2dc5151-audit-dir\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311836 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plsvm\" (UniqueName: \"kubernetes.io/projected/5498ab7d-3a96-4342-8670-9882b2dc5151-kube-api-access-plsvm\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311866 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-audit-policies\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311884 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311915 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311941 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.311993 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.312017 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-login\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.312040 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.340853 4949 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.375925 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413141 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413229 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413269 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413297 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-login\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413324 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413380 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413399 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-session\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413419 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413435 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-error\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413450 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413474 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5498ab7d-3a96-4342-8670-9882b2dc5151-audit-dir\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413502 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plsvm\" (UniqueName: \"kubernetes.io/projected/5498ab7d-3a96-4342-8670-9882b2dc5151-kube-api-access-plsvm\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413522 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-audit-policies\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.413538 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.414651 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5498ab7d-3a96-4342-8670-9882b2dc5151-audit-dir\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.414842 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.415383 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.415852 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-audit-policies\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.416787 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.421409 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.421499 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.421419 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-login\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.423036 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.423276 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.423642 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-system-session\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.424708 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.432304 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plsvm\" (UniqueName: \"kubernetes.io/projected/5498ab7d-3a96-4342-8670-9882b2dc5151-kube-api-access-plsvm\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.432532 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5498ab7d-3a96-4342-8670-9882b2dc5151-v4-0-config-user-template-error\") pod \"oauth-openshift-7b49777cd7-fmkg6\" (UID: \"5498ab7d-3a96-4342-8670-9882b2dc5151\") " pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.452738 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.628962 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.719558 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.809412 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.832110 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.868976 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.871669 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7b49777cd7-fmkg6"] Feb 16 11:11:30 crc kubenswrapper[4949]: I0216 11:11:30.991540 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.062579 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.140118 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.200857 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.225767 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.245340 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e66d330-dc75-4a98-9739-589d8df61a68" path="/var/lib/kubelet/pods/5e66d330-dc75-4a98-9739-589d8df61a68/volumes" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.296108 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.341994 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.411013 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.411544 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.427955 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.463868 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.512200 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.577620 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.607470 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" event={"ID":"5498ab7d-3a96-4342-8670-9882b2dc5151","Type":"ContainerStarted","Data":"2ee0701f8a4ebf68a5ba925ac8b69d94354dba250c5968b6100e287f30f63036"} Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.607531 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" event={"ID":"5498ab7d-3a96-4342-8670-9882b2dc5151","Type":"ContainerStarted","Data":"267c90ac5656e5078a68b82ac2db439ce675d8f25c59e0a87ad2f6dac13fbeeb"} Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.607969 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.616332 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.631265 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7b49777cd7-fmkg6" podStartSLOduration=60.631245206 podStartE2EDuration="1m0.631245206s" podCreationTimestamp="2026-02-16 11:10:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:11:31.628775784 +0000 UTC m=+281.258109969" watchObservedRunningTime="2026-02-16 11:11:31.631245206 +0000 UTC m=+281.260579371" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.639996 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.758763 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.759708 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.760965 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.772941 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.787083 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.871976 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.973582 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 16 11:11:31 crc kubenswrapper[4949]: I0216 11:11:31.981277 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.124761 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.161961 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.245652 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.454958 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.629010 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.671348 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.707049 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.777124 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.853748 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.908478 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.922021 4949 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.922330 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36" gracePeriod=5 Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.972957 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 16 11:11:32 crc kubenswrapper[4949]: I0216 11:11:32.995024 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.009006 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.148797 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.155770 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.213607 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.319507 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.330362 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.487160 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.531981 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.819400 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.919804 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 16 11:11:33 crc kubenswrapper[4949]: I0216 11:11:33.921834 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.014332 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.061719 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.085443 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.091310 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.141239 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.197009 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.235129 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.563135 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.629512 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.673351 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.718055 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.951496 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 16 11:11:34 crc kubenswrapper[4949]: I0216 11:11:34.987695 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.139004 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.143083 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.231469 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.284049 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.308751 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.426461 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.441568 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.443704 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.451028 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.490052 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.604873 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.901512 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.922905 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 16 11:11:35 crc kubenswrapper[4949]: I0216 11:11:35.970109 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.057609 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.057712 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.221523 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.221792 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.221849 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.221923 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222064 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222137 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222152 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222338 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222294 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222863 4949 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222884 4949 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222899 4949 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.222912 4949 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.235343 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.323710 4949 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.649317 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.649374 4949 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36" exitCode=137 Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.649418 4949 scope.go:117] "RemoveContainer" containerID="d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.649544 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.679437 4949 scope.go:117] "RemoveContainer" containerID="d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36" Feb 16 11:11:38 crc kubenswrapper[4949]: E0216 11:11:38.679985 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36\": container with ID starting with d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36 not found: ID does not exist" containerID="d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36" Feb 16 11:11:38 crc kubenswrapper[4949]: I0216 11:11:38.680191 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36"} err="failed to get container status \"d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36\": rpc error: code = NotFound desc = could not find container \"d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36\": container with ID starting with d3bc4461237a46bc4cf1654e7d5759f48b9c5b2756d32edc2e1874ac4c00de36 not found: ID does not exist" Feb 16 11:11:39 crc kubenswrapper[4949]: I0216 11:11:39.246871 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Feb 16 11:11:39 crc kubenswrapper[4949]: I0216 11:11:39.248601 4949 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Feb 16 11:11:39 crc kubenswrapper[4949]: I0216 11:11:39.262305 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 16 11:11:39 crc kubenswrapper[4949]: I0216 11:11:39.262361 4949 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ec1e50f1-119d-4899-bb16-11df813add28" Feb 16 11:11:39 crc kubenswrapper[4949]: I0216 11:11:39.265747 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 16 11:11:39 crc kubenswrapper[4949]: I0216 11:11:39.265806 4949 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ec1e50f1-119d-4899-bb16-11df813add28" Feb 16 11:11:48 crc kubenswrapper[4949]: I0216 11:11:48.092518 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 16 11:11:50 crc kubenswrapper[4949]: I0216 11:11:50.974922 4949 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Feb 16 11:11:51 crc kubenswrapper[4949]: I0216 11:11:51.745592 4949 generic.go:334] "Generic (PLEG): container finished" podID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerID="42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1" exitCode=0 Feb 16 11:11:51 crc kubenswrapper[4949]: I0216 11:11:51.745696 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" event={"ID":"f50a54ec-5563-4d56-8639-86a6003e0b0e","Type":"ContainerDied","Data":"42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1"} Feb 16 11:11:51 crc kubenswrapper[4949]: I0216 11:11:51.748208 4949 scope.go:117] "RemoveContainer" containerID="42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1" Feb 16 11:11:52 crc kubenswrapper[4949]: I0216 11:11:52.755626 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" event={"ID":"f50a54ec-5563-4d56-8639-86a6003e0b0e","Type":"ContainerStarted","Data":"e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0"} Feb 16 11:11:52 crc kubenswrapper[4949]: I0216 11:11:52.756256 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:11:52 crc kubenswrapper[4949]: I0216 11:11:52.758451 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:11:55 crc kubenswrapper[4949]: I0216 11:11:55.626082 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 16 11:11:55 crc kubenswrapper[4949]: I0216 11:11:55.788558 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Feb 16 11:11:55 crc kubenswrapper[4949]: I0216 11:11:55.790603 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 16 11:11:55 crc kubenswrapper[4949]: I0216 11:11:55.790694 4949 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f2db7eaefab4e9796abba64513a3e9e4ae09fc9aab222cad4b27257a62f8ee4f" exitCode=137 Feb 16 11:11:55 crc kubenswrapper[4949]: I0216 11:11:55.790743 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f2db7eaefab4e9796abba64513a3e9e4ae09fc9aab222cad4b27257a62f8ee4f"} Feb 16 11:11:55 crc kubenswrapper[4949]: I0216 11:11:55.790789 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7e0d4ee4b5924fcb394a971ead0fe52255e0466497267f77ac43abede242adb7"} Feb 16 11:11:55 crc kubenswrapper[4949]: I0216 11:11:55.790815 4949 scope.go:117] "RemoveContainer" containerID="68519f3c9bfc45ad28f92e2cf0c28a9413821784aafd91ab65a311259ed6ecf6" Feb 16 11:11:56 crc kubenswrapper[4949]: I0216 11:11:56.798381 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Feb 16 11:11:56 crc kubenswrapper[4949]: I0216 11:11:56.986282 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:11:58 crc kubenswrapper[4949]: I0216 11:11:58.798790 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 16 11:12:01 crc kubenswrapper[4949]: I0216 11:12:01.499266 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 16 11:12:01 crc kubenswrapper[4949]: I0216 11:12:01.692027 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 16 11:12:02 crc kubenswrapper[4949]: I0216 11:12:02.775276 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 16 11:12:04 crc kubenswrapper[4949]: I0216 11:12:04.777194 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:12:04 crc kubenswrapper[4949]: I0216 11:12:04.781217 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:12:06 crc kubenswrapper[4949]: I0216 11:12:06.990453 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 16 11:12:09 crc kubenswrapper[4949]: I0216 11:12:09.782459 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 16 11:13:04 crc kubenswrapper[4949]: I0216 11:13:04.550619 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:13:04 crc kubenswrapper[4949]: I0216 11:13:04.551532 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.654668 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w4598"] Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.656766 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w4598" podUID="73a35f90-b2ec-4518-b927-844e164a8531" containerName="registry-server" containerID="cri-o://aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77" gracePeriod=30 Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.678600 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fcdq5"] Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.691875 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fcdq5" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" containerName="registry-server" containerID="cri-o://9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387" gracePeriod=30 Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.699050 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-khb7r"] Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.710060 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2gtt"] Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.710455 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-c2gtt" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" containerName="registry-server" containerID="cri-o://96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96" gracePeriod=30 Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.720263 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2jvc5"] Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.720723 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2jvc5" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="registry-server" containerID="cri-o://cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf" gracePeriod=30 Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.728800 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pp4fp"] Feb 16 11:13:06 crc kubenswrapper[4949]: E0216 11:13:06.729501 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.729519 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.729764 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.730388 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.739439 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pp4fp"] Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.826483 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6jmc\" (UniqueName: \"kubernetes.io/projected/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-kube-api-access-g6jmc\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.826928 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.826971 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.929806 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.929862 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6jmc\" (UniqueName: \"kubernetes.io/projected/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-kube-api-access-g6jmc\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.929897 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.931315 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.940893 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:06 crc kubenswrapper[4949]: I0216 11:13:06.954542 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6jmc\" (UniqueName: \"kubernetes.io/projected/80cb28ef-0fde-4224-84e3-df2e1ca5ffce-kube-api-access-g6jmc\") pod \"marketplace-operator-79b997595-pp4fp\" (UID: \"80cb28ef-0fde-4224-84e3-df2e1ca5ffce\") " pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.058100 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.065258 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.069587 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.127726 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.144624 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-utilities\") pod \"73a35f90-b2ec-4518-b927-844e164a8531\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.144667 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-js2w6\" (UniqueName: \"kubernetes.io/projected/c2716c39-f511-47e9-a400-94cb1cd5ba42-kube-api-access-js2w6\") pod \"c2716c39-f511-47e9-a400-94cb1cd5ba42\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.144692 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mdzf\" (UniqueName: \"kubernetes.io/projected/73a35f90-b2ec-4518-b927-844e164a8531-kube-api-access-5mdzf\") pod \"73a35f90-b2ec-4518-b927-844e164a8531\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.144719 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-catalog-content\") pod \"73a35f90-b2ec-4518-b927-844e164a8531\" (UID: \"73a35f90-b2ec-4518-b927-844e164a8531\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.148334 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-utilities" (OuterVolumeSpecName: "utilities") pod "73a35f90-b2ec-4518-b927-844e164a8531" (UID: "73a35f90-b2ec-4518-b927-844e164a8531"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.149229 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hn2z\" (UniqueName: \"kubernetes.io/projected/19e05ba4-d60e-479d-ae62-6853917d7537-kube-api-access-5hn2z\") pod \"19e05ba4-d60e-479d-ae62-6853917d7537\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.149462 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-utilities\") pod \"c2716c39-f511-47e9-a400-94cb1cd5ba42\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.149488 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-utilities\") pod \"19e05ba4-d60e-479d-ae62-6853917d7537\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.149567 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-catalog-content\") pod \"c2716c39-f511-47e9-a400-94cb1cd5ba42\" (UID: \"c2716c39-f511-47e9-a400-94cb1cd5ba42\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.149686 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-catalog-content\") pod \"19e05ba4-d60e-479d-ae62-6853917d7537\" (UID: \"19e05ba4-d60e-479d-ae62-6853917d7537\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.153331 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.156792 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-utilities" (OuterVolumeSpecName: "utilities") pod "c2716c39-f511-47e9-a400-94cb1cd5ba42" (UID: "c2716c39-f511-47e9-a400-94cb1cd5ba42"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.157048 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2716c39-f511-47e9-a400-94cb1cd5ba42-kube-api-access-js2w6" (OuterVolumeSpecName: "kube-api-access-js2w6") pod "c2716c39-f511-47e9-a400-94cb1cd5ba42" (UID: "c2716c39-f511-47e9-a400-94cb1cd5ba42"). InnerVolumeSpecName "kube-api-access-js2w6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.157121 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73a35f90-b2ec-4518-b927-844e164a8531-kube-api-access-5mdzf" (OuterVolumeSpecName: "kube-api-access-5mdzf") pod "73a35f90-b2ec-4518-b927-844e164a8531" (UID: "73a35f90-b2ec-4518-b927-844e164a8531"). InnerVolumeSpecName "kube-api-access-5mdzf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.157908 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-utilities" (OuterVolumeSpecName: "utilities") pod "19e05ba4-d60e-479d-ae62-6853917d7537" (UID: "19e05ba4-d60e-479d-ae62-6853917d7537"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.165255 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19e05ba4-d60e-479d-ae62-6853917d7537-kube-api-access-5hn2z" (OuterVolumeSpecName: "kube-api-access-5hn2z") pod "19e05ba4-d60e-479d-ae62-6853917d7537" (UID: "19e05ba4-d60e-479d-ae62-6853917d7537"). InnerVolumeSpecName "kube-api-access-5hn2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.174515 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.207679 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "19e05ba4-d60e-479d-ae62-6853917d7537" (UID: "19e05ba4-d60e-479d-ae62-6853917d7537"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.230327 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "73a35f90-b2ec-4518-b927-844e164a8531" (UID: "73a35f90-b2ec-4518-b927-844e164a8531"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.249589 4949 generic.go:334] "Generic (PLEG): container finished" podID="73a35f90-b2ec-4518-b927-844e164a8531" containerID="aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77" exitCode=0 Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.249757 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4598" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.253940 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-utilities\") pod \"37cad064-c760-43e0-8a5c-fb66fc774246\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254007 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-catalog-content\") pod \"37cad064-c760-43e0-8a5c-fb66fc774246\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254158 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtvnm\" (UniqueName: \"kubernetes.io/projected/37cad064-c760-43e0-8a5c-fb66fc774246-kube-api-access-jtvnm\") pod \"37cad064-c760-43e0-8a5c-fb66fc774246\" (UID: \"37cad064-c760-43e0-8a5c-fb66fc774246\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254441 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254463 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254473 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19e05ba4-d60e-479d-ae62-6853917d7537-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254482 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-js2w6\" (UniqueName: \"kubernetes.io/projected/c2716c39-f511-47e9-a400-94cb1cd5ba42-kube-api-access-js2w6\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254494 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mdzf\" (UniqueName: \"kubernetes.io/projected/73a35f90-b2ec-4518-b927-844e164a8531-kube-api-access-5mdzf\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254504 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73a35f90-b2ec-4518-b927-844e164a8531-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.254513 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hn2z\" (UniqueName: \"kubernetes.io/projected/19e05ba4-d60e-479d-ae62-6853917d7537-kube-api-access-5hn2z\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.255373 4949 generic.go:334] "Generic (PLEG): container finished" podID="19e05ba4-d60e-479d-ae62-6853917d7537" containerID="96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96" exitCode=0 Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.255436 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-utilities" (OuterVolumeSpecName: "utilities") pod "37cad064-c760-43e0-8a5c-fb66fc774246" (UID: "37cad064-c760-43e0-8a5c-fb66fc774246"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.255560 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c2gtt" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.260773 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37cad064-c760-43e0-8a5c-fb66fc774246-kube-api-access-jtvnm" (OuterVolumeSpecName: "kube-api-access-jtvnm") pod "37cad064-c760-43e0-8a5c-fb66fc774246" (UID: "37cad064-c760-43e0-8a5c-fb66fc774246"). InnerVolumeSpecName "kube-api-access-jtvnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.262783 4949 generic.go:334] "Generic (PLEG): container finished" podID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerID="cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf" exitCode=0 Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.262917 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2jvc5" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.267726 4949 generic.go:334] "Generic (PLEG): container finished" podID="37cad064-c760-43e0-8a5c-fb66fc774246" containerID="9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387" exitCode=0 Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.267893 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fcdq5" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.268049 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" containerID="cri-o://e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0" gracePeriod=30 Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317209 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4598" event={"ID":"73a35f90-b2ec-4518-b927-844e164a8531","Type":"ContainerDied","Data":"aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77"} Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317259 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4598" event={"ID":"73a35f90-b2ec-4518-b927-844e164a8531","Type":"ContainerDied","Data":"ada7bdc9cbafda6faf3dec2384e7005bc52889809c1f57d378e5b237b78412af"} Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317276 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2gtt" event={"ID":"19e05ba4-d60e-479d-ae62-6853917d7537","Type":"ContainerDied","Data":"96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96"} Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317290 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2gtt" event={"ID":"19e05ba4-d60e-479d-ae62-6853917d7537","Type":"ContainerDied","Data":"1972a4e79f1728013337bf3616f75cd0c1ff61c6b2230cfb331830fa04abb675"} Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317299 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2jvc5" event={"ID":"c2716c39-f511-47e9-a400-94cb1cd5ba42","Type":"ContainerDied","Data":"cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf"} Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317315 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2jvc5" event={"ID":"c2716c39-f511-47e9-a400-94cb1cd5ba42","Type":"ContainerDied","Data":"7e8cf975ca616732517d61ab3753ca67da4d2472dfe92eb384bbb22efa0c8686"} Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317330 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fcdq5" event={"ID":"37cad064-c760-43e0-8a5c-fb66fc774246","Type":"ContainerDied","Data":"9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387"} Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317343 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fcdq5" event={"ID":"37cad064-c760-43e0-8a5c-fb66fc774246","Type":"ContainerDied","Data":"155a6dad4e1e4948b44ad4746ab755f15e081bca437333b9daa0e990cff48143"} Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.317367 4949 scope.go:117] "RemoveContainer" containerID="aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.321288 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2gtt"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.324778 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37cad064-c760-43e0-8a5c-fb66fc774246" (UID: "37cad064-c760-43e0-8a5c-fb66fc774246"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.334245 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2gtt"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.339878 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pp4fp"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.346555 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w4598"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.349975 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w4598"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.356568 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.356747 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37cad064-c760-43e0-8a5c-fb66fc774246-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.357278 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtvnm\" (UniqueName: \"kubernetes.io/projected/37cad064-c760-43e0-8a5c-fb66fc774246-kube-api-access-jtvnm\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.359547 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2716c39-f511-47e9-a400-94cb1cd5ba42" (UID: "c2716c39-f511-47e9-a400-94cb1cd5ba42"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.360919 4949 scope.go:117] "RemoveContainer" containerID="2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.443086 4949 scope.go:117] "RemoveContainer" containerID="1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.459449 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2716c39-f511-47e9-a400-94cb1cd5ba42-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.475802 4949 scope.go:117] "RemoveContainer" containerID="aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.476241 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77\": container with ID starting with aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77 not found: ID does not exist" containerID="aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.476268 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77"} err="failed to get container status \"aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77\": rpc error: code = NotFound desc = could not find container \"aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77\": container with ID starting with aaebc216754e07cd5639d6445177d38893a419204e2b54619ef24ac626951f77 not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.476290 4949 scope.go:117] "RemoveContainer" containerID="2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.476509 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35\": container with ID starting with 2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35 not found: ID does not exist" containerID="2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.476557 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35"} err="failed to get container status \"2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35\": rpc error: code = NotFound desc = could not find container \"2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35\": container with ID starting with 2601e166a6f735b5b2389c88eaa87a832c3bdb4bc6b262645c3b4569f1058d35 not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.476570 4949 scope.go:117] "RemoveContainer" containerID="1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.476812 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9\": container with ID starting with 1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9 not found: ID does not exist" containerID="1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.476836 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9"} err="failed to get container status \"1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9\": rpc error: code = NotFound desc = could not find container \"1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9\": container with ID starting with 1ebf5ef8e503da4de58a111a7a9586e39292b7eda7d29f3183a92e9469ba16a9 not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.476848 4949 scope.go:117] "RemoveContainer" containerID="96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.493742 4949 scope.go:117] "RemoveContainer" containerID="3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.528967 4949 scope.go:117] "RemoveContainer" containerID="fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.558894 4949 scope.go:117] "RemoveContainer" containerID="96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.559572 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96\": container with ID starting with 96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96 not found: ID does not exist" containerID="96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.559655 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96"} err="failed to get container status \"96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96\": rpc error: code = NotFound desc = could not find container \"96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96\": container with ID starting with 96d4428a669d625cd4c8d0f0b96bb5d6ad767855fd175562aa80a701be2bdf96 not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.559716 4949 scope.go:117] "RemoveContainer" containerID="3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.560289 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae\": container with ID starting with 3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae not found: ID does not exist" containerID="3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.560362 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae"} err="failed to get container status \"3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae\": rpc error: code = NotFound desc = could not find container \"3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae\": container with ID starting with 3bcc754528f93b5356acf57932d57f7e0c795695386d13148c9fe8e5198fc3ae not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.560410 4949 scope.go:117] "RemoveContainer" containerID="fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.560934 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932\": container with ID starting with fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932 not found: ID does not exist" containerID="fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.560975 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932"} err="failed to get container status \"fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932\": rpc error: code = NotFound desc = could not find container \"fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932\": container with ID starting with fa86e4c6f1ac09150b3aeb77b4d5cd2882f8a3eb6e4d1320ca5810b14d53f932 not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.561016 4949 scope.go:117] "RemoveContainer" containerID="cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.579009 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.580921 4949 scope.go:117] "RemoveContainer" containerID="7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.594111 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2jvc5"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.598072 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2jvc5"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.645397 4949 scope.go:117] "RemoveContainer" containerID="4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.658509 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fcdq5"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.661800 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-operator-metrics\") pod \"f50a54ec-5563-4d56-8639-86a6003e0b0e\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.661904 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctz5q\" (UniqueName: \"kubernetes.io/projected/f50a54ec-5563-4d56-8639-86a6003e0b0e-kube-api-access-ctz5q\") pod \"f50a54ec-5563-4d56-8639-86a6003e0b0e\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.661938 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-trusted-ca\") pod \"f50a54ec-5563-4d56-8639-86a6003e0b0e\" (UID: \"f50a54ec-5563-4d56-8639-86a6003e0b0e\") " Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.662008 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fcdq5"] Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.663020 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "f50a54ec-5563-4d56-8639-86a6003e0b0e" (UID: "f50a54ec-5563-4d56-8639-86a6003e0b0e"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.665889 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f50a54ec-5563-4d56-8639-86a6003e0b0e-kube-api-access-ctz5q" (OuterVolumeSpecName: "kube-api-access-ctz5q") pod "f50a54ec-5563-4d56-8639-86a6003e0b0e" (UID: "f50a54ec-5563-4d56-8639-86a6003e0b0e"). InnerVolumeSpecName "kube-api-access-ctz5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.666208 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "f50a54ec-5563-4d56-8639-86a6003e0b0e" (UID: "f50a54ec-5563-4d56-8639-86a6003e0b0e"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.676459 4949 scope.go:117] "RemoveContainer" containerID="cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.676984 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf\": container with ID starting with cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf not found: ID does not exist" containerID="cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.677097 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf"} err="failed to get container status \"cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf\": rpc error: code = NotFound desc = could not find container \"cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf\": container with ID starting with cb42da8c1eb59adbd159486be5a1eaf6b2f00c071c75f05e63da4f0401ba75bf not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.677253 4949 scope.go:117] "RemoveContainer" containerID="7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.677700 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680\": container with ID starting with 7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680 not found: ID does not exist" containerID="7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.677736 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680"} err="failed to get container status \"7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680\": rpc error: code = NotFound desc = could not find container \"7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680\": container with ID starting with 7e593655bc1e5915fe68d9d855cac645901bc32622c92087c8c32a206588f680 not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.677769 4949 scope.go:117] "RemoveContainer" containerID="4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.678147 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e\": container with ID starting with 4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e not found: ID does not exist" containerID="4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.678223 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e"} err="failed to get container status \"4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e\": rpc error: code = NotFound desc = could not find container \"4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e\": container with ID starting with 4201e2f03bb1a0ccd08baea4a1384c0bfeec656318d2647f8d8427e86441155e not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.678239 4949 scope.go:117] "RemoveContainer" containerID="9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.696042 4949 scope.go:117] "RemoveContainer" containerID="a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.714445 4949 scope.go:117] "RemoveContainer" containerID="29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.731864 4949 scope.go:117] "RemoveContainer" containerID="9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.732421 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387\": container with ID starting with 9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387 not found: ID does not exist" containerID="9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.732458 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387"} err="failed to get container status \"9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387\": rpc error: code = NotFound desc = could not find container \"9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387\": container with ID starting with 9c0bacc6281fd8a737b14024b78bfa4ca1732dc029e44a38f2cac8fa563bb387 not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.732485 4949 scope.go:117] "RemoveContainer" containerID="a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.732976 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a\": container with ID starting with a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a not found: ID does not exist" containerID="a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.733001 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a"} err="failed to get container status \"a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a\": rpc error: code = NotFound desc = could not find container \"a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a\": container with ID starting with a9445a9fec4778807c14bd524b80b1acb4aaf9881b32ee06deaa159769f1606a not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.733016 4949 scope.go:117] "RemoveContainer" containerID="29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e" Feb 16 11:13:07 crc kubenswrapper[4949]: E0216 11:13:07.733485 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e\": container with ID starting with 29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e not found: ID does not exist" containerID="29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.733505 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e"} err="failed to get container status \"29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e\": rpc error: code = NotFound desc = could not find container \"29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e\": container with ID starting with 29f0364b579646e95a62816d324031feb34ca7345cfc3a4c0ac07a0d99913a5e not found: ID does not exist" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.763909 4949 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.763946 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctz5q\" (UniqueName: \"kubernetes.io/projected/f50a54ec-5563-4d56-8639-86a6003e0b0e-kube-api-access-ctz5q\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:07 crc kubenswrapper[4949]: I0216 11:13:07.763961 4949 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f50a54ec-5563-4d56-8639-86a6003e0b0e-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.285860 4949 generic.go:334] "Generic (PLEG): container finished" podID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerID="e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0" exitCode=0 Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.286012 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" event={"ID":"f50a54ec-5563-4d56-8639-86a6003e0b0e","Type":"ContainerDied","Data":"e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0"} Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.286065 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" event={"ID":"f50a54ec-5563-4d56-8639-86a6003e0b0e","Type":"ContainerDied","Data":"2ae1d2b20d0df819544681e0e945094e62079304da890f2c8d4e71669b06f0ba"} Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.286094 4949 scope.go:117] "RemoveContainer" containerID="e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.286359 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-khb7r" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.292294 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" event={"ID":"80cb28ef-0fde-4224-84e3-df2e1ca5ffce","Type":"ContainerStarted","Data":"81a7955a3b6351b7db618142764dc4d99c1a4b31162c272a46c1f25530e1abba"} Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.292362 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" event={"ID":"80cb28ef-0fde-4224-84e3-df2e1ca5ffce","Type":"ContainerStarted","Data":"f692c983d552f6fce4cd718d102e1d3b80d6d2cca2af0e9125f18651515cbd8a"} Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.293635 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.299392 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.321834 4949 scope.go:117] "RemoveContainer" containerID="42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.322464 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-pp4fp" podStartSLOduration=2.32243013 podStartE2EDuration="2.32243013s" podCreationTimestamp="2026-02-16 11:13:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:13:08.316216475 +0000 UTC m=+377.945550660" watchObservedRunningTime="2026-02-16 11:13:08.32243013 +0000 UTC m=+377.951764335" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.373272 4949 scope.go:117] "RemoveContainer" containerID="e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.373961 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0\": container with ID starting with e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0 not found: ID does not exist" containerID="e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.373999 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0"} err="failed to get container status \"e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0\": rpc error: code = NotFound desc = could not find container \"e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0\": container with ID starting with e5ec96a119aa938b436e6809845b345b1d757e4259f34bacf7d298a0f05313a0 not found: ID does not exist" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.374026 4949 scope.go:117] "RemoveContainer" containerID="42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.374435 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1\": container with ID starting with 42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1 not found: ID does not exist" containerID="42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.374557 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1"} err="failed to get container status \"42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1\": rpc error: code = NotFound desc = could not find container \"42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1\": container with ID starting with 42785c54bf371ca60d66c60914237b6ed75f54d5445b7302272c911ffd6ee7c1 not found: ID does not exist" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.375028 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-khb7r"] Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.378228 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-khb7r"] Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.864029 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kp26g"] Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865117 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865138 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865153 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" containerName="extract-content" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865161 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" containerName="extract-content" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865187 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865197 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865207 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865215 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865229 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865235 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865246 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" containerName="extract-content" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865254 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" containerName="extract-content" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865261 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" containerName="extract-utilities" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865268 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" containerName="extract-utilities" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865278 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73a35f90-b2ec-4518-b927-844e164a8531" containerName="extract-content" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865286 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="73a35f90-b2ec-4518-b927-844e164a8531" containerName="extract-content" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865295 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73a35f90-b2ec-4518-b927-844e164a8531" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865302 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="73a35f90-b2ec-4518-b927-844e164a8531" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865311 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" containerName="extract-utilities" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865320 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" containerName="extract-utilities" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865331 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73a35f90-b2ec-4518-b927-844e164a8531" containerName="extract-utilities" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865339 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="73a35f90-b2ec-4518-b927-844e164a8531" containerName="extract-utilities" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865350 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865357 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865367 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="extract-utilities" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865374 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="extract-utilities" Feb 16 11:13:08 crc kubenswrapper[4949]: E0216 11:13:08.865388 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="extract-content" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865396 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="extract-content" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865521 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865534 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865552 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="73a35f90-b2ec-4518-b927-844e164a8531" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865563 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865574 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" containerName="registry-server" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.865785 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" containerName="marketplace-operator" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.866960 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.870450 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.876976 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kp26g"] Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.882831 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78507519-366b-4876-8361-caa27244e918-utilities\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.882977 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78507519-366b-4876-8361-caa27244e918-catalog-content\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.883139 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqtpz\" (UniqueName: \"kubernetes.io/projected/78507519-366b-4876-8361-caa27244e918-kube-api-access-sqtpz\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.984750 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqtpz\" (UniqueName: \"kubernetes.io/projected/78507519-366b-4876-8361-caa27244e918-kube-api-access-sqtpz\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.984881 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78507519-366b-4876-8361-caa27244e918-utilities\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.984921 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78507519-366b-4876-8361-caa27244e918-catalog-content\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.985613 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78507519-366b-4876-8361-caa27244e918-utilities\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:08 crc kubenswrapper[4949]: I0216 11:13:08.985971 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78507519-366b-4876-8361-caa27244e918-catalog-content\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.009451 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqtpz\" (UniqueName: \"kubernetes.io/projected/78507519-366b-4876-8361-caa27244e918-kube-api-access-sqtpz\") pod \"redhat-marketplace-kp26g\" (UID: \"78507519-366b-4876-8361-caa27244e918\") " pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.066693 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s6hs5"] Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.068300 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.072269 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.083202 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s6hs5"] Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.086206 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72ec1df3-0f00-466a-ae9c-77294ac6ed28-catalog-content\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.086253 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72ec1df3-0f00-466a-ae9c-77294ac6ed28-utilities\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.086278 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjlgj\" (UniqueName: \"kubernetes.io/projected/72ec1df3-0f00-466a-ae9c-77294ac6ed28-kube-api-access-sjlgj\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.187316 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72ec1df3-0f00-466a-ae9c-77294ac6ed28-catalog-content\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.187378 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72ec1df3-0f00-466a-ae9c-77294ac6ed28-utilities\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.187417 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjlgj\" (UniqueName: \"kubernetes.io/projected/72ec1df3-0f00-466a-ae9c-77294ac6ed28-kube-api-access-sjlgj\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.187954 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72ec1df3-0f00-466a-ae9c-77294ac6ed28-utilities\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.188044 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72ec1df3-0f00-466a-ae9c-77294ac6ed28-catalog-content\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.208316 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjlgj\" (UniqueName: \"kubernetes.io/projected/72ec1df3-0f00-466a-ae9c-77294ac6ed28-kube-api-access-sjlgj\") pod \"certified-operators-s6hs5\" (UID: \"72ec1df3-0f00-466a-ae9c-77294ac6ed28\") " pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.229386 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.249541 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19e05ba4-d60e-479d-ae62-6853917d7537" path="/var/lib/kubelet/pods/19e05ba4-d60e-479d-ae62-6853917d7537/volumes" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.250390 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37cad064-c760-43e0-8a5c-fb66fc774246" path="/var/lib/kubelet/pods/37cad064-c760-43e0-8a5c-fb66fc774246/volumes" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.250998 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73a35f90-b2ec-4518-b927-844e164a8531" path="/var/lib/kubelet/pods/73a35f90-b2ec-4518-b927-844e164a8531/volumes" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.252060 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2716c39-f511-47e9-a400-94cb1cd5ba42" path="/var/lib/kubelet/pods/c2716c39-f511-47e9-a400-94cb1cd5ba42/volumes" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.253341 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f50a54ec-5563-4d56-8639-86a6003e0b0e" path="/var/lib/kubelet/pods/f50a54ec-5563-4d56-8639-86a6003e0b0e/volumes" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.394269 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.457152 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kp26g"] Feb 16 11:13:09 crc kubenswrapper[4949]: W0216 11:13:09.467040 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78507519_366b_4876_8361_caa27244e918.slice/crio-baf4cc78995c5656cbd1d6aa96207d75a50e62068cf941eb27ecf43f795a7819 WatchSource:0}: Error finding container baf4cc78995c5656cbd1d6aa96207d75a50e62068cf941eb27ecf43f795a7819: Status 404 returned error can't find the container with id baf4cc78995c5656cbd1d6aa96207d75a50e62068cf941eb27ecf43f795a7819 Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.593979 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s6hs5"] Feb 16 11:13:09 crc kubenswrapper[4949]: W0216 11:13:09.601904 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72ec1df3_0f00_466a_ae9c_77294ac6ed28.slice/crio-a5af1257188a70cbea7d1b872e219ec9b958f8755dc21471a8d5a3325b12e537 WatchSource:0}: Error finding container a5af1257188a70cbea7d1b872e219ec9b958f8755dc21471a8d5a3325b12e537: Status 404 returned error can't find the container with id a5af1257188a70cbea7d1b872e219ec9b958f8755dc21471a8d5a3325b12e537 Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.744043 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-ldkbl"] Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.745035 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.764113 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-ldkbl"] Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.897886 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/46966051-75f5-49a1-9a9f-0b2afd382a43-installation-pull-secrets\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.898266 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/46966051-75f5-49a1-9a9f-0b2afd382a43-ca-trust-extracted\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.898307 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvqlm\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-kube-api-access-vvqlm\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.898335 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46966051-75f5-49a1-9a9f-0b2afd382a43-trusted-ca\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.898363 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-bound-sa-token\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.898589 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.898654 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-registry-tls\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.898686 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/46966051-75f5-49a1-9a9f-0b2afd382a43-registry-certificates\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:09 crc kubenswrapper[4949]: I0216 11:13:09.927317 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.000810 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/46966051-75f5-49a1-9a9f-0b2afd382a43-installation-pull-secrets\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.000924 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/46966051-75f5-49a1-9a9f-0b2afd382a43-ca-trust-extracted\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.000980 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvqlm\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-kube-api-access-vvqlm\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.001009 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46966051-75f5-49a1-9a9f-0b2afd382a43-trusted-ca\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.001047 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-bound-sa-token\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.001093 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-registry-tls\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.001116 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/46966051-75f5-49a1-9a9f-0b2afd382a43-registry-certificates\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.002196 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/46966051-75f5-49a1-9a9f-0b2afd382a43-ca-trust-extracted\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.004849 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46966051-75f5-49a1-9a9f-0b2afd382a43-trusted-ca\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.005860 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/46966051-75f5-49a1-9a9f-0b2afd382a43-registry-certificates\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.007878 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/46966051-75f5-49a1-9a9f-0b2afd382a43-installation-pull-secrets\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.007899 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-registry-tls\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.019203 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-bound-sa-token\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.022729 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvqlm\" (UniqueName: \"kubernetes.io/projected/46966051-75f5-49a1-9a9f-0b2afd382a43-kube-api-access-vvqlm\") pod \"image-registry-66df7c8f76-ldkbl\" (UID: \"46966051-75f5-49a1-9a9f-0b2afd382a43\") " pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.094230 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.310356 4949 generic.go:334] "Generic (PLEG): container finished" podID="72ec1df3-0f00-466a-ae9c-77294ac6ed28" containerID="6c7e276cd8de52eb39ecd3413c158918ff7ceab3cc383edeb67ace6da4b5f991" exitCode=0 Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.310419 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s6hs5" event={"ID":"72ec1df3-0f00-466a-ae9c-77294ac6ed28","Type":"ContainerDied","Data":"6c7e276cd8de52eb39ecd3413c158918ff7ceab3cc383edeb67ace6da4b5f991"} Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.310687 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s6hs5" event={"ID":"72ec1df3-0f00-466a-ae9c-77294ac6ed28","Type":"ContainerStarted","Data":"a5af1257188a70cbea7d1b872e219ec9b958f8755dc21471a8d5a3325b12e537"} Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.312216 4949 generic.go:334] "Generic (PLEG): container finished" podID="78507519-366b-4876-8361-caa27244e918" containerID="e0088a66ff544026ee795382c5dfb22b1ba407d3bce33b71f83fa00d1e4b9c69" exitCode=0 Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.312344 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kp26g" event={"ID":"78507519-366b-4876-8361-caa27244e918","Type":"ContainerDied","Data":"e0088a66ff544026ee795382c5dfb22b1ba407d3bce33b71f83fa00d1e4b9c69"} Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.312386 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kp26g" event={"ID":"78507519-366b-4876-8361-caa27244e918","Type":"ContainerStarted","Data":"baf4cc78995c5656cbd1d6aa96207d75a50e62068cf941eb27ecf43f795a7819"} Feb 16 11:13:10 crc kubenswrapper[4949]: I0216 11:13:10.501690 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-ldkbl"] Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.262453 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8lxg9"] Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.263518 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.268679 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.275098 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8lxg9"] Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.321568 4949 generic.go:334] "Generic (PLEG): container finished" podID="78507519-366b-4876-8361-caa27244e918" containerID="f3a7e1bccd77e9329094a2ab3a71d1ce095d09efe0eb7b3ef577ff276df5455c" exitCode=0 Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.321606 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kp26g" event={"ID":"78507519-366b-4876-8361-caa27244e918","Type":"ContainerDied","Data":"f3a7e1bccd77e9329094a2ab3a71d1ce095d09efe0eb7b3ef577ff276df5455c"} Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.323944 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" event={"ID":"46966051-75f5-49a1-9a9f-0b2afd382a43","Type":"ContainerStarted","Data":"3c622312c07d45dad2436f9a545c4fb05361e21a985329f4be5439ad4a9df598"} Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.324028 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.324047 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" event={"ID":"46966051-75f5-49a1-9a9f-0b2afd382a43","Type":"ContainerStarted","Data":"990d62b3cf60b44541a688e0ad2ce84087e7de4ff0e98fdd9787a6f215ba8160"} Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.325690 4949 generic.go:334] "Generic (PLEG): container finished" podID="72ec1df3-0f00-466a-ae9c-77294ac6ed28" containerID="98fe1c668db895e457cabe5f617d8d3d776dc8b5788ffdeb5ca83e000edd2793" exitCode=0 Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.325720 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s6hs5" event={"ID":"72ec1df3-0f00-466a-ae9c-77294ac6ed28","Type":"ContainerDied","Data":"98fe1c668db895e457cabe5f617d8d3d776dc8b5788ffdeb5ca83e000edd2793"} Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.367391 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" podStartSLOduration=2.367371092 podStartE2EDuration="2.367371092s" podCreationTimestamp="2026-02-16 11:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:13:11.363374232 +0000 UTC m=+380.992708397" watchObservedRunningTime="2026-02-16 11:13:11.367371092 +0000 UTC m=+380.996705257" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.421074 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf37823-eac2-462d-82ff-b54b8d3aaccd-utilities\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.421208 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb84c\" (UniqueName: \"kubernetes.io/projected/daf37823-eac2-462d-82ff-b54b8d3aaccd-kube-api-access-jb84c\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.421235 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf37823-eac2-462d-82ff-b54b8d3aaccd-catalog-content\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.461883 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2ckdb"] Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.463232 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.469321 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.490647 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2ckdb"] Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.522395 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf37823-eac2-462d-82ff-b54b8d3aaccd-utilities\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.522501 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb84c\" (UniqueName: \"kubernetes.io/projected/daf37823-eac2-462d-82ff-b54b8d3aaccd-kube-api-access-jb84c\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.522543 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf37823-eac2-462d-82ff-b54b8d3aaccd-catalog-content\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.522928 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf37823-eac2-462d-82ff-b54b8d3aaccd-utilities\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.523471 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf37823-eac2-462d-82ff-b54b8d3aaccd-catalog-content\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.554310 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb84c\" (UniqueName: \"kubernetes.io/projected/daf37823-eac2-462d-82ff-b54b8d3aaccd-kube-api-access-jb84c\") pod \"redhat-operators-8lxg9\" (UID: \"daf37823-eac2-462d-82ff-b54b8d3aaccd\") " pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.618254 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.623677 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tgf6\" (UniqueName: \"kubernetes.io/projected/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-kube-api-access-5tgf6\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.623733 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-catalog-content\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.623773 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-utilities\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.725118 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-catalog-content\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.725583 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-utilities\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.725671 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tgf6\" (UniqueName: \"kubernetes.io/projected/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-kube-api-access-5tgf6\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.725897 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-catalog-content\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.726379 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-utilities\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.752833 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tgf6\" (UniqueName: \"kubernetes.io/projected/c3335bb3-fadb-4f0f-bf0f-5632510a5a06-kube-api-access-5tgf6\") pod \"community-operators-2ckdb\" (UID: \"c3335bb3-fadb-4f0f-bf0f-5632510a5a06\") " pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.779360 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:11 crc kubenswrapper[4949]: I0216 11:13:11.869044 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8lxg9"] Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.022526 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2ckdb"] Feb 16 11:13:12 crc kubenswrapper[4949]: W0216 11:13:12.024069 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3335bb3_fadb_4f0f_bf0f_5632510a5a06.slice/crio-430dd92d03436f6b7c89fcaa123aa3b20aa389fca918ae7167c6a833a66ad0b5 WatchSource:0}: Error finding container 430dd92d03436f6b7c89fcaa123aa3b20aa389fca918ae7167c6a833a66ad0b5: Status 404 returned error can't find the container with id 430dd92d03436f6b7c89fcaa123aa3b20aa389fca918ae7167c6a833a66ad0b5 Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.341411 4949 generic.go:334] "Generic (PLEG): container finished" podID="daf37823-eac2-462d-82ff-b54b8d3aaccd" containerID="0afbad83a162438facd46008151f96f765c86d433787defd9f8a9f95a68a719f" exitCode=0 Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.341753 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8lxg9" event={"ID":"daf37823-eac2-462d-82ff-b54b8d3aaccd","Type":"ContainerDied","Data":"0afbad83a162438facd46008151f96f765c86d433787defd9f8a9f95a68a719f"} Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.341877 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8lxg9" event={"ID":"daf37823-eac2-462d-82ff-b54b8d3aaccd","Type":"ContainerStarted","Data":"8d8f8b219a4f8020cb5ed7658d31dcf2b93adbbac80beb4f257448422f93399a"} Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.348730 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kp26g" event={"ID":"78507519-366b-4876-8361-caa27244e918","Type":"ContainerStarted","Data":"715ff885cbac6cfc61da761247865ce9e0ad99d757add587cacc6544c0275205"} Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.360978 4949 generic.go:334] "Generic (PLEG): container finished" podID="c3335bb3-fadb-4f0f-bf0f-5632510a5a06" containerID="f4a9c4064aea35616ab59d53375787bf58c386032092a397b42ebc9c34b1bbe3" exitCode=0 Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.361105 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2ckdb" event={"ID":"c3335bb3-fadb-4f0f-bf0f-5632510a5a06","Type":"ContainerDied","Data":"f4a9c4064aea35616ab59d53375787bf58c386032092a397b42ebc9c34b1bbe3"} Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.361196 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2ckdb" event={"ID":"c3335bb3-fadb-4f0f-bf0f-5632510a5a06","Type":"ContainerStarted","Data":"430dd92d03436f6b7c89fcaa123aa3b20aa389fca918ae7167c6a833a66ad0b5"} Feb 16 11:13:12 crc kubenswrapper[4949]: I0216 11:13:12.418481 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kp26g" podStartSLOduration=2.97051471 podStartE2EDuration="4.418452034s" podCreationTimestamp="2026-02-16 11:13:08 +0000 UTC" firstStartedPulling="2026-02-16 11:13:10.315522681 +0000 UTC m=+379.944856846" lastFinishedPulling="2026-02-16 11:13:11.763460005 +0000 UTC m=+381.392794170" observedRunningTime="2026-02-16 11:13:12.418215708 +0000 UTC m=+382.047549873" watchObservedRunningTime="2026-02-16 11:13:12.418452034 +0000 UTC m=+382.047786199" Feb 16 11:13:13 crc kubenswrapper[4949]: I0216 11:13:13.368774 4949 generic.go:334] "Generic (PLEG): container finished" podID="c3335bb3-fadb-4f0f-bf0f-5632510a5a06" containerID="0c0d3c7295703ad1261134940fdc6012c286e2428736aca6ce01c80181262755" exitCode=0 Feb 16 11:13:13 crc kubenswrapper[4949]: I0216 11:13:13.368849 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2ckdb" event={"ID":"c3335bb3-fadb-4f0f-bf0f-5632510a5a06","Type":"ContainerDied","Data":"0c0d3c7295703ad1261134940fdc6012c286e2428736aca6ce01c80181262755"} Feb 16 11:13:13 crc kubenswrapper[4949]: I0216 11:13:13.371616 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s6hs5" event={"ID":"72ec1df3-0f00-466a-ae9c-77294ac6ed28","Type":"ContainerStarted","Data":"8ad70fa9bba9f2166bd908327c6da7e0686621a1e0e293153ec7d6373e87f5dd"} Feb 16 11:13:13 crc kubenswrapper[4949]: I0216 11:13:13.377906 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8lxg9" event={"ID":"daf37823-eac2-462d-82ff-b54b8d3aaccd","Type":"ContainerStarted","Data":"8de06f6ecb36286dd66e3bd0af17ab6d7f4275af35f947bd9afa8a67629cd495"} Feb 16 11:13:14 crc kubenswrapper[4949]: I0216 11:13:14.388747 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2ckdb" event={"ID":"c3335bb3-fadb-4f0f-bf0f-5632510a5a06","Type":"ContainerStarted","Data":"87b3e2d4809ce614bb072f59ef82972646dc3c6dfc7bbf00532fb67246df15d6"} Feb 16 11:13:14 crc kubenswrapper[4949]: I0216 11:13:14.391981 4949 generic.go:334] "Generic (PLEG): container finished" podID="daf37823-eac2-462d-82ff-b54b8d3aaccd" containerID="8de06f6ecb36286dd66e3bd0af17ab6d7f4275af35f947bd9afa8a67629cd495" exitCode=0 Feb 16 11:13:14 crc kubenswrapper[4949]: I0216 11:13:14.392186 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8lxg9" event={"ID":"daf37823-eac2-462d-82ff-b54b8d3aaccd","Type":"ContainerDied","Data":"8de06f6ecb36286dd66e3bd0af17ab6d7f4275af35f947bd9afa8a67629cd495"} Feb 16 11:13:14 crc kubenswrapper[4949]: I0216 11:13:14.411786 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s6hs5" podStartSLOduration=3.448711711 podStartE2EDuration="5.41176621s" podCreationTimestamp="2026-02-16 11:13:09 +0000 UTC" firstStartedPulling="2026-02-16 11:13:10.312347031 +0000 UTC m=+379.941681196" lastFinishedPulling="2026-02-16 11:13:12.27540153 +0000 UTC m=+381.904735695" observedRunningTime="2026-02-16 11:13:13.428386664 +0000 UTC m=+383.057720839" watchObservedRunningTime="2026-02-16 11:13:14.41176621 +0000 UTC m=+384.041100375" Feb 16 11:13:14 crc kubenswrapper[4949]: I0216 11:13:14.439754 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2ckdb" podStartSLOduration=1.979477588 podStartE2EDuration="3.43972875s" podCreationTimestamp="2026-02-16 11:13:11 +0000 UTC" firstStartedPulling="2026-02-16 11:13:12.363467216 +0000 UTC m=+381.992801381" lastFinishedPulling="2026-02-16 11:13:13.823718378 +0000 UTC m=+383.453052543" observedRunningTime="2026-02-16 11:13:14.412754085 +0000 UTC m=+384.042088250" watchObservedRunningTime="2026-02-16 11:13:14.43972875 +0000 UTC m=+384.069062925" Feb 16 11:13:15 crc kubenswrapper[4949]: I0216 11:13:15.400470 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8lxg9" event={"ID":"daf37823-eac2-462d-82ff-b54b8d3aaccd","Type":"ContainerStarted","Data":"08c9c7a0cd90c07853a1b3487f3ab25e7d6b3ac5a7edd5dfd966b6ef8d157fc6"} Feb 16 11:13:19 crc kubenswrapper[4949]: I0216 11:13:19.230057 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:19 crc kubenswrapper[4949]: I0216 11:13:19.230647 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:19 crc kubenswrapper[4949]: I0216 11:13:19.283630 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:19 crc kubenswrapper[4949]: I0216 11:13:19.309340 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8lxg9" podStartSLOduration=5.880090086 podStartE2EDuration="8.309322123s" podCreationTimestamp="2026-02-16 11:13:11 +0000 UTC" firstStartedPulling="2026-02-16 11:13:12.34406473 +0000 UTC m=+381.973398895" lastFinishedPulling="2026-02-16 11:13:14.773296747 +0000 UTC m=+384.402630932" observedRunningTime="2026-02-16 11:13:15.421852955 +0000 UTC m=+385.051187120" watchObservedRunningTime="2026-02-16 11:13:19.309322123 +0000 UTC m=+388.938656288" Feb 16 11:13:19 crc kubenswrapper[4949]: I0216 11:13:19.394974 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:19 crc kubenswrapper[4949]: I0216 11:13:19.395398 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:19 crc kubenswrapper[4949]: I0216 11:13:19.458491 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:19 crc kubenswrapper[4949]: I0216 11:13:19.475327 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kp26g" Feb 16 11:13:20 crc kubenswrapper[4949]: I0216 11:13:20.487770 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s6hs5" Feb 16 11:13:21 crc kubenswrapper[4949]: I0216 11:13:21.618806 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:21 crc kubenswrapper[4949]: I0216 11:13:21.619247 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:21 crc kubenswrapper[4949]: I0216 11:13:21.666590 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:21 crc kubenswrapper[4949]: I0216 11:13:21.779546 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:21 crc kubenswrapper[4949]: I0216 11:13:21.779968 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:21 crc kubenswrapper[4949]: I0216 11:13:21.824541 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:22 crc kubenswrapper[4949]: I0216 11:13:22.489100 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2ckdb" Feb 16 11:13:22 crc kubenswrapper[4949]: I0216 11:13:22.492986 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8lxg9" Feb 16 11:13:30 crc kubenswrapper[4949]: I0216 11:13:30.100731 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-ldkbl" Feb 16 11:13:30 crc kubenswrapper[4949]: I0216 11:13:30.176659 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzx7x"] Feb 16 11:13:34 crc kubenswrapper[4949]: I0216 11:13:34.550148 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:13:34 crc kubenswrapper[4949]: I0216 11:13:34.550844 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.646342 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w"] Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.647877 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.650863 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.650919 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.656910 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.656971 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.657713 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.662509 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w"] Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.781457 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6jmp\" (UniqueName: \"kubernetes.io/projected/05467dda-f90f-4bdf-bc54-232c924152cd-kube-api-access-p6jmp\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.781555 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/05467dda-f90f-4bdf-bc54-232c924152cd-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.781921 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/05467dda-f90f-4bdf-bc54-232c924152cd-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.883254 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6jmp\" (UniqueName: \"kubernetes.io/projected/05467dda-f90f-4bdf-bc54-232c924152cd-kube-api-access-p6jmp\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.883347 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/05467dda-f90f-4bdf-bc54-232c924152cd-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.883411 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/05467dda-f90f-4bdf-bc54-232c924152cd-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.885258 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/05467dda-f90f-4bdf-bc54-232c924152cd-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.896581 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/05467dda-f90f-4bdf-bc54-232c924152cd-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.912968 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6jmp\" (UniqueName: \"kubernetes.io/projected/05467dda-f90f-4bdf-bc54-232c924152cd-kube-api-access-p6jmp\") pod \"cluster-monitoring-operator-6d5b84845-28f7w\" (UID: \"05467dda-f90f-4bdf-bc54-232c924152cd\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:40 crc kubenswrapper[4949]: I0216 11:13:40.971613 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" Feb 16 11:13:41 crc kubenswrapper[4949]: I0216 11:13:41.449620 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w"] Feb 16 11:13:41 crc kubenswrapper[4949]: I0216 11:13:41.568353 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" event={"ID":"05467dda-f90f-4bdf-bc54-232c924152cd","Type":"ContainerStarted","Data":"0d3a12512c17035d2473431b94d7e3efbfd0a734b72ff841fc587b4bbeeab52a"} Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.554049 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj"] Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.555333 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.568816 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.568997 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-brmsj" Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.575619 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj"] Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.594350 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" event={"ID":"05467dda-f90f-4bdf-bc54-232c924152cd","Type":"ContainerStarted","Data":"45b1b2762676c6c8ae5c95f51a8952d9d29d13e30c5fb239b2bcfc5f10e0c03d"} Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.613013 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-28f7w" podStartSLOduration=2.176761961 podStartE2EDuration="5.612986996s" podCreationTimestamp="2026-02-16 11:13:40 +0000 UTC" firstStartedPulling="2026-02-16 11:13:41.465912671 +0000 UTC m=+411.095246836" lastFinishedPulling="2026-02-16 11:13:44.902137706 +0000 UTC m=+414.531471871" observedRunningTime="2026-02-16 11:13:45.610344555 +0000 UTC m=+415.239678720" watchObservedRunningTime="2026-02-16 11:13:45.612986996 +0000 UTC m=+415.242321161" Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.661739 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/a8b6c585-e0d8-4e6a-bb20-95a113e810b1-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-8ttcj\" (UID: \"a8b6c585-e0d8-4e6a-bb20-95a113e810b1\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.763030 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/a8b6c585-e0d8-4e6a-bb20-95a113e810b1-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-8ttcj\" (UID: \"a8b6c585-e0d8-4e6a-bb20-95a113e810b1\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.770925 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/a8b6c585-e0d8-4e6a-bb20-95a113e810b1-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-8ttcj\" (UID: \"a8b6c585-e0d8-4e6a-bb20-95a113e810b1\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" Feb 16 11:13:45 crc kubenswrapper[4949]: I0216 11:13:45.870377 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" Feb 16 11:13:46 crc kubenswrapper[4949]: I0216 11:13:46.120324 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj"] Feb 16 11:13:46 crc kubenswrapper[4949]: I0216 11:13:46.604650 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" event={"ID":"a8b6c585-e0d8-4e6a-bb20-95a113e810b1","Type":"ContainerStarted","Data":"ebd349ed15b2efad9789130ef90cee87e5962f853b98923ef0f2f257d5ebdc4e"} Feb 16 11:13:48 crc kubenswrapper[4949]: I0216 11:13:48.651917 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" event={"ID":"a8b6c585-e0d8-4e6a-bb20-95a113e810b1","Type":"ContainerStarted","Data":"5983c6e346e3117145d774ebfd72dcfc2743ffb33787009ceda4df760bfa9705"} Feb 16 11:13:48 crc kubenswrapper[4949]: I0216 11:13:48.652299 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" Feb 16 11:13:48 crc kubenswrapper[4949]: I0216 11:13:48.656628 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" Feb 16 11:13:48 crc kubenswrapper[4949]: I0216 11:13:48.666164 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ttcj" podStartSLOduration=2.22566766 podStartE2EDuration="3.666138872s" podCreationTimestamp="2026-02-16 11:13:45 +0000 UTC" firstStartedPulling="2026-02-16 11:13:46.129842961 +0000 UTC m=+415.759177126" lastFinishedPulling="2026-02-16 11:13:47.570314173 +0000 UTC m=+417.199648338" observedRunningTime="2026-02-16 11:13:48.664281213 +0000 UTC m=+418.293615388" watchObservedRunningTime="2026-02-16 11:13:48.666138872 +0000 UTC m=+418.295473037" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.619433 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-jm5h6"] Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.620550 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.622517 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.624422 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.624848 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.625329 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-b5r67" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.644900 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-jm5h6"] Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.728360 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/69ced23e-6719-4914-952b-ce3969461fd8-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.728431 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtp9v\" (UniqueName: \"kubernetes.io/projected/69ced23e-6719-4914-952b-ce3969461fd8-kube-api-access-jtp9v\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.728462 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/69ced23e-6719-4914-952b-ce3969461fd8-metrics-client-ca\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.728487 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/69ced23e-6719-4914-952b-ce3969461fd8-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.829803 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/69ced23e-6719-4914-952b-ce3969461fd8-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.829932 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtp9v\" (UniqueName: \"kubernetes.io/projected/69ced23e-6719-4914-952b-ce3969461fd8-kube-api-access-jtp9v\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.830025 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/69ced23e-6719-4914-952b-ce3969461fd8-metrics-client-ca\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.830070 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/69ced23e-6719-4914-952b-ce3969461fd8-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.831679 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/69ced23e-6719-4914-952b-ce3969461fd8-metrics-client-ca\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.838217 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/69ced23e-6719-4914-952b-ce3969461fd8-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.838397 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/69ced23e-6719-4914-952b-ce3969461fd8-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.852941 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtp9v\" (UniqueName: \"kubernetes.io/projected/69ced23e-6719-4914-952b-ce3969461fd8-kube-api-access-jtp9v\") pod \"prometheus-operator-db54df47d-jm5h6\" (UID: \"69ced23e-6719-4914-952b-ce3969461fd8\") " pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:49 crc kubenswrapper[4949]: I0216 11:13:49.936318 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" Feb 16 11:13:50 crc kubenswrapper[4949]: I0216 11:13:50.142987 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-jm5h6"] Feb 16 11:13:50 crc kubenswrapper[4949]: I0216 11:13:50.691972 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" event={"ID":"69ced23e-6719-4914-952b-ce3969461fd8","Type":"ContainerStarted","Data":"9fb20711acc2b35c16455d794bafc47be3a1efb0040413d213761176cf5860f7"} Feb 16 11:13:52 crc kubenswrapper[4949]: I0216 11:13:52.707819 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" event={"ID":"69ced23e-6719-4914-952b-ce3969461fd8","Type":"ContainerStarted","Data":"8d78d0a2d103652f55246d8e86a47c26af9bee093386ce9dd4d194924abe15ff"} Feb 16 11:13:53 crc kubenswrapper[4949]: I0216 11:13:53.716489 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" event={"ID":"69ced23e-6719-4914-952b-ce3969461fd8","Type":"ContainerStarted","Data":"28f02b3e1a89292077d43dab3ca182ac5d935a951d5db2bffe1044bc5f235b4e"} Feb 16 11:13:53 crc kubenswrapper[4949]: I0216 11:13:53.737162 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-jm5h6" podStartSLOduration=2.397471588 podStartE2EDuration="4.737050001s" podCreationTimestamp="2026-02-16 11:13:49 +0000 UTC" firstStartedPulling="2026-02-16 11:13:50.15807137 +0000 UTC m=+419.787405535" lastFinishedPulling="2026-02-16 11:13:52.497649773 +0000 UTC m=+422.126983948" observedRunningTime="2026-02-16 11:13:53.731657576 +0000 UTC m=+423.360991741" watchObservedRunningTime="2026-02-16 11:13:53.737050001 +0000 UTC m=+423.366384166" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.223719 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" podUID="1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" containerName="registry" containerID="cri-o://699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5" gracePeriod=30 Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.620971 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.725929 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-installation-pull-secrets\") pod \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.726002 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-certificates\") pod \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.726057 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-trusted-ca\") pod \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.726105 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-bound-sa-token\") pod \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.726282 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.726339 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xv55\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-kube-api-access-5xv55\") pod \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.726398 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-tls\") pod \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.726434 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-ca-trust-extracted\") pod \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\" (UID: \"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e\") " Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.729966 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.731009 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.746760 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.748263 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.748529 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-kube-api-access-5xv55" (OuterVolumeSpecName: "kube-api-access-5xv55") pod "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e"). InnerVolumeSpecName "kube-api-access-5xv55". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.748823 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.748927 4949 generic.go:334] "Generic (PLEG): container finished" podID="1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" containerID="699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5" exitCode=0 Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.748965 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" event={"ID":"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e","Type":"ContainerDied","Data":"699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5"} Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.748993 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" event={"ID":"1d09a8d4-1225-4b5a-94ee-149b7c0fe01e","Type":"ContainerDied","Data":"8213291aafa07829d745958cf0e5d25cb120f2ba76c934aad396faec08f84684"} Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.749008 4949 scope.go:117] "RemoveContainer" containerID="699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.749188 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kzx7x" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.765481 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.784640 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" (UID: "1d09a8d4-1225-4b5a-94ee-149b7c0fe01e"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.789847 4949 scope.go:117] "RemoveContainer" containerID="699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5" Feb 16 11:13:55 crc kubenswrapper[4949]: E0216 11:13:55.791863 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5\": container with ID starting with 699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5 not found: ID does not exist" containerID="699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.791930 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5"} err="failed to get container status \"699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5\": rpc error: code = NotFound desc = could not find container \"699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5\": container with ID starting with 699f54fd17fb88fc82829c56f831b8594c14a8a6ff7f231bc79278ebde3963d5 not found: ID does not exist" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.827560 4949 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.827604 4949 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.827617 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.827627 4949 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.827636 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xv55\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-kube-api-access-5xv55\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.827645 4949 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:55 crc kubenswrapper[4949]: I0216 11:13:55.827653 4949 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.024331 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-zfqtx"] Feb 16 11:13:56 crc kubenswrapper[4949]: E0216 11:13:56.024684 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" containerName="registry" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.024700 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" containerName="registry" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.024826 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" containerName="registry" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.025736 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.028094 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-v2k72" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.029060 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.029314 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.043567 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-h48nm"] Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.046297 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.054892 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-tjq2m" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.055463 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.056191 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.060012 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-h48nm"] Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.089365 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf"] Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.091212 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.094678 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.095374 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-rf8hs" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.095569 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.095699 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.119224 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf"] Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.132004 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-root\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.136116 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.136166 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-sys\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.136216 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-textfile\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.136273 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-tls\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.136313 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spqvk\" (UniqueName: \"kubernetes.io/projected/fb73ccdd-72b5-47e0-bb99-9147c64e783c-kube-api-access-spqvk\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.136358 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-wtmp\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.136378 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/fb73ccdd-72b5-47e0-bb99-9147c64e783c-metrics-client-ca\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.173719 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzx7x"] Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.182916 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzx7x"] Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238332 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/01617150-585e-4645-88a1-6edd24398621-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238431 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-textfile\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238468 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-sys\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238516 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238552 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkdtq\" (UniqueName: \"kubernetes.io/projected/01617150-585e-4645-88a1-6edd24398621-kube-api-access-kkdtq\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238587 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238623 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-tls\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238655 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spqvk\" (UniqueName: \"kubernetes.io/projected/fb73ccdd-72b5-47e0-bb99-9147c64e783c-kube-api-access-spqvk\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238681 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-wtmp\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238705 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/fb73ccdd-72b5-47e0-bb99-9147c64e783c-metrics-client-ca\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238756 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5d9368ad-8939-4e65-9184-9d221916bf56-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238780 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/01617150-585e-4645-88a1-6edd24398621-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238808 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238834 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238861 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238892 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-root\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238923 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.238945 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsqfk\" (UniqueName: \"kubernetes.io/projected/5d9368ad-8939-4e65-9184-9d221916bf56-kube-api-access-xsqfk\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.239675 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-textfile\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.239770 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-root\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.239975 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-sys\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.243908 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/fb73ccdd-72b5-47e0-bb99-9147c64e783c-metrics-client-ca\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.244124 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-wtmp\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.257943 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.261548 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/fb73ccdd-72b5-47e0-bb99-9147c64e783c-node-exporter-tls\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.264727 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spqvk\" (UniqueName: \"kubernetes.io/projected/fb73ccdd-72b5-47e0-bb99-9147c64e783c-kube-api-access-spqvk\") pod \"node-exporter-zfqtx\" (UID: \"fb73ccdd-72b5-47e0-bb99-9147c64e783c\") " pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347228 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347713 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkdtq\" (UniqueName: \"kubernetes.io/projected/01617150-585e-4645-88a1-6edd24398621-kube-api-access-kkdtq\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347758 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347821 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5d9368ad-8939-4e65-9184-9d221916bf56-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347848 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/01617150-585e-4645-88a1-6edd24398621-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347874 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347892 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347922 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347949 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsqfk\" (UniqueName: \"kubernetes.io/projected/5d9368ad-8939-4e65-9184-9d221916bf56-kube-api-access-xsqfk\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.347975 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/01617150-585e-4645-88a1-6edd24398621-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.348750 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/01617150-585e-4645-88a1-6edd24398621-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.349556 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/01617150-585e-4645-88a1-6edd24398621-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: E0216 11:13:56.350412 4949 secret.go:188] Couldn't get secret openshift-monitoring/openshift-state-metrics-tls: secret "openshift-state-metrics-tls" not found Feb 16 11:13:56 crc kubenswrapper[4949]: E0216 11:13:56.350529 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-tls podName:5d9368ad-8939-4e65-9184-9d221916bf56 nodeName:}" failed. No retries permitted until 2026-02-16 11:13:56.850504535 +0000 UTC m=+426.479838900 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "openshift-state-metrics-tls" (UniqueName: "kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-tls") pod "openshift-state-metrics-566fddb674-h48nm" (UID: "5d9368ad-8939-4e65-9184-9d221916bf56") : secret "openshift-state-metrics-tls" not found Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.350570 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.351787 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5d9368ad-8939-4e65-9184-9d221916bf56-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.354439 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.354923 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-zfqtx" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.358485 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.364151 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/01617150-585e-4645-88a1-6edd24398621-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.379337 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkdtq\" (UniqueName: \"kubernetes.io/projected/01617150-585e-4645-88a1-6edd24398621-kube-api-access-kkdtq\") pod \"kube-state-metrics-777cb5bd5d-2m5bf\" (UID: \"01617150-585e-4645-88a1-6edd24398621\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.388501 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsqfk\" (UniqueName: \"kubernetes.io/projected/5d9368ad-8939-4e65-9184-9d221916bf56-kube-api-access-xsqfk\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.415409 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.640435 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf"] Feb 16 11:13:56 crc kubenswrapper[4949]: W0216 11:13:56.644726 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01617150_585e_4645_88a1_6edd24398621.slice/crio-3fef3bf14930b34b778da40020a6e386e5094f954673610dd8aafb2d05573217 WatchSource:0}: Error finding container 3fef3bf14930b34b778da40020a6e386e5094f954673610dd8aafb2d05573217: Status 404 returned error can't find the container with id 3fef3bf14930b34b778da40020a6e386e5094f954673610dd8aafb2d05573217 Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.757121 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-zfqtx" event={"ID":"fb73ccdd-72b5-47e0-bb99-9147c64e783c","Type":"ContainerStarted","Data":"5e1be84eaca9b3a556d794b73db47374d5dba72f0f651db06b38ffb9cc4cc330"} Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.760544 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" event={"ID":"01617150-585e-4645-88a1-6edd24398621","Type":"ContainerStarted","Data":"3fef3bf14930b34b778da40020a6e386e5094f954673610dd8aafb2d05573217"} Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.854886 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:56 crc kubenswrapper[4949]: I0216 11:13:56.863167 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/5d9368ad-8939-4e65-9184-9d221916bf56-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-h48nm\" (UID: \"5d9368ad-8939-4e65-9184-9d221916bf56\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.001677 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.234248 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.238684 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.244124 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.244426 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.244674 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-kppgm" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.244945 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.245095 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.245106 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.249078 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.249837 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.256115 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.269435 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d09a8d4-1225-4b5a-94ee-149b7c0fe01e" path="/var/lib/kubelet/pods/1d09a8d4-1225-4b5a-94ee-149b7c0fe01e/volumes" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.270305 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.281697 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-h48nm"] Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408407 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efcb1701-3963-48c2-894a-c754339e78b0-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408691 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408726 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/efcb1701-3963-48c2-894a-c754339e78b0-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408748 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-web-config\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408773 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408836 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408898 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/efcb1701-3963-48c2-894a-c754339e78b0-config-out\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408928 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvxn6\" (UniqueName: \"kubernetes.io/projected/efcb1701-3963-48c2-894a-c754339e78b0-kube-api-access-jvxn6\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408954 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-config-volume\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.408979 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/efcb1701-3963-48c2-894a-c754339e78b0-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.409015 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.409089 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/efcb1701-3963-48c2-894a-c754339e78b0-tls-assets\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510636 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efcb1701-3963-48c2-894a-c754339e78b0-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510703 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510741 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/efcb1701-3963-48c2-894a-c754339e78b0-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510770 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-web-config\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510824 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510846 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510867 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/efcb1701-3963-48c2-894a-c754339e78b0-config-out\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510886 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvxn6\" (UniqueName: \"kubernetes.io/projected/efcb1701-3963-48c2-894a-c754339e78b0-kube-api-access-jvxn6\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510905 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-config-volume\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510924 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/efcb1701-3963-48c2-894a-c754339e78b0-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510949 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.510972 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/efcb1701-3963-48c2-894a-c754339e78b0-tls-assets\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.512552 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/efcb1701-3963-48c2-894a-c754339e78b0-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.513501 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efcb1701-3963-48c2-894a-c754339e78b0-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.514218 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/efcb1701-3963-48c2-894a-c754339e78b0-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.516262 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-config-volume\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.519668 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/efcb1701-3963-48c2-894a-c754339e78b0-tls-assets\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.519928 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.528584 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/efcb1701-3963-48c2-894a-c754339e78b0-config-out\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.528836 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.529124 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.534011 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-web-config\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.535355 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvxn6\" (UniqueName: \"kubernetes.io/projected/efcb1701-3963-48c2-894a-c754339e78b0-kube-api-access-jvxn6\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.539083 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/efcb1701-3963-48c2-894a-c754339e78b0-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"efcb1701-3963-48c2-894a-c754339e78b0\") " pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:57 crc kubenswrapper[4949]: I0216 11:13:57.587469 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:57.768051 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" event={"ID":"5d9368ad-8939-4e65-9184-9d221916bf56","Type":"ContainerStarted","Data":"2236daf23a83eec110d6f8101d599ff8bd1190aefffe7e84874dd5560abbfd01"} Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:57.769324 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" event={"ID":"5d9368ad-8939-4e65-9184-9d221916bf56","Type":"ContainerStarted","Data":"751ae25f72cc16c4b52891de3bf070c7e665603ac1f5229b8754dae62a2198dc"} Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:57.769370 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" event={"ID":"5d9368ad-8939-4e65-9184-9d221916bf56","Type":"ContainerStarted","Data":"d4bb43d81d4c7913dc3e8af4f0715b4eb798f9eb9faa78bc5c1b7eb7615ab611"} Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.142539 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-6b868984d8-gd5qc"] Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.151111 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.154495 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.154642 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.154903 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.154996 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-kgg2c" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.155080 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.155408 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-9lrtn44d20qih" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.160040 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.191421 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-6b868984d8-gd5qc"] Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.220571 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.220608 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbskb\" (UniqueName: \"kubernetes.io/projected/e637f49b-86a6-4961-bdac-3d5d751aa75e-kube-api-access-pbskb\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.220627 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/e637f49b-86a6-4961-bdac-3d5d751aa75e-metrics-client-ca\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.220650 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-grpc-tls\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.220683 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.220704 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.220720 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.220752 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-tls\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.322996 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.323059 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.323086 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.323123 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-tls\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.323386 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.323406 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbskb\" (UniqueName: \"kubernetes.io/projected/e637f49b-86a6-4961-bdac-3d5d751aa75e-kube-api-access-pbskb\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.323429 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/e637f49b-86a6-4961-bdac-3d5d751aa75e-metrics-client-ca\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.323458 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-grpc-tls\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.325545 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/e637f49b-86a6-4961-bdac-3d5d751aa75e-metrics-client-ca\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.332676 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.332853 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.333129 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-tls\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.335726 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-grpc-tls\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.337107 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.340701 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/e637f49b-86a6-4961-bdac-3d5d751aa75e-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.346492 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbskb\" (UniqueName: \"kubernetes.io/projected/e637f49b-86a6-4961-bdac-3d5d751aa75e-kube-api-access-pbskb\") pod \"thanos-querier-6b868984d8-gd5qc\" (UID: \"e637f49b-86a6-4961-bdac-3d5d751aa75e\") " pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.511296 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.730727 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.784748 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"efcb1701-3963-48c2-894a-c754339e78b0","Type":"ContainerStarted","Data":"b0f3da9e3b6257f1e6b51aeb351e39b968e8dddaa7465289324ac3556fea5787"} Feb 16 11:13:58 crc kubenswrapper[4949]: I0216 11:13:58.938123 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-6b868984d8-gd5qc"] Feb 16 11:13:58 crc kubenswrapper[4949]: W0216 11:13:58.945010 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode637f49b_86a6_4961_bdac_3d5d751aa75e.slice/crio-309fae40274f2c3071a783896b43e0a67636e7a51658ec03bd59cdb870818258 WatchSource:0}: Error finding container 309fae40274f2c3071a783896b43e0a67636e7a51658ec03bd59cdb870818258: Status 404 returned error can't find the container with id 309fae40274f2c3071a783896b43e0a67636e7a51658ec03bd59cdb870818258 Feb 16 11:13:59 crc kubenswrapper[4949]: I0216 11:13:59.792438 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" event={"ID":"e637f49b-86a6-4961-bdac-3d5d751aa75e","Type":"ContainerStarted","Data":"309fae40274f2c3071a783896b43e0a67636e7a51658ec03bd59cdb870818258"} Feb 16 11:13:59 crc kubenswrapper[4949]: I0216 11:13:59.795666 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" event={"ID":"01617150-585e-4645-88a1-6edd24398621","Type":"ContainerStarted","Data":"df2515cf1e6c917013dd0276c047927b29f5a8e9d9b9c20034ab4e52fb49af9b"} Feb 16 11:13:59 crc kubenswrapper[4949]: I0216 11:13:59.795696 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" event={"ID":"01617150-585e-4645-88a1-6edd24398621","Type":"ContainerStarted","Data":"b58a13fd3793643929ed73586b2446de71d087e9d5541f70df95b5d4a928842d"} Feb 16 11:13:59 crc kubenswrapper[4949]: I0216 11:13:59.795711 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" event={"ID":"01617150-585e-4645-88a1-6edd24398621","Type":"ContainerStarted","Data":"8612e8ae059406ceb855582d9a488511a21f9ed671358812680ceec0d9efc5e3"} Feb 16 11:13:59 crc kubenswrapper[4949]: I0216 11:13:59.797220 4949 generic.go:334] "Generic (PLEG): container finished" podID="fb73ccdd-72b5-47e0-bb99-9147c64e783c" containerID="847f5d7193884b87b0052fd7ea5966bdeff8482453d4e98870fc0f4df3797720" exitCode=0 Feb 16 11:13:59 crc kubenswrapper[4949]: I0216 11:13:59.797294 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-zfqtx" event={"ID":"fb73ccdd-72b5-47e0-bb99-9147c64e783c","Type":"ContainerDied","Data":"847f5d7193884b87b0052fd7ea5966bdeff8482453d4e98870fc0f4df3797720"} Feb 16 11:13:59 crc kubenswrapper[4949]: I0216 11:13:59.820036 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-2m5bf" podStartSLOduration=1.7592223470000001 podStartE2EDuration="3.820004359s" podCreationTimestamp="2026-02-16 11:13:56 +0000 UTC" firstStartedPulling="2026-02-16 11:13:56.648331245 +0000 UTC m=+426.277665410" lastFinishedPulling="2026-02-16 11:13:58.709113257 +0000 UTC m=+428.338447422" observedRunningTime="2026-02-16 11:13:59.815051307 +0000 UTC m=+429.444385472" watchObservedRunningTime="2026-02-16 11:13:59.820004359 +0000 UTC m=+429.449338544" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.811953 4949 generic.go:334] "Generic (PLEG): container finished" podID="efcb1701-3963-48c2-894a-c754339e78b0" containerID="9a09a823877befb0ddb24081cbbf91a9857b96746eb8f8b62055c3eb83ed9a6e" exitCode=0 Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.812142 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"efcb1701-3963-48c2-894a-c754339e78b0","Type":"ContainerDied","Data":"9a09a823877befb0ddb24081cbbf91a9857b96746eb8f8b62055c3eb83ed9a6e"} Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.814951 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7d46cdf4c7-mbnnn"] Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.818054 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-zfqtx" event={"ID":"fb73ccdd-72b5-47e0-bb99-9147c64e783c","Type":"ContainerStarted","Data":"d178223430c49215db1e0f626ab4df8e6342debbdcd61c6e4d0e3950ccaf69f1"} Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.818102 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-zfqtx" event={"ID":"fb73ccdd-72b5-47e0-bb99-9147c64e783c","Type":"ContainerStarted","Data":"20ddddff89f0db29d378fce4d33e87f7d961cdb2759cc85d3bab28511d19165f"} Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.818214 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.833055 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7d46cdf4c7-mbnnn"] Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.841024 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" event={"ID":"5d9368ad-8939-4e65-9184-9d221916bf56","Type":"ContainerStarted","Data":"90c5bdc63cbeb45408c46ceb0906322ee0d00ef111cf29fedd3c26872fad751f"} Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.948073 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-zfqtx" podStartSLOduration=2.634282936 podStartE2EDuration="4.94804086s" podCreationTimestamp="2026-02-16 11:13:56 +0000 UTC" firstStartedPulling="2026-02-16 11:13:56.390705829 +0000 UTC m=+426.020039994" lastFinishedPulling="2026-02-16 11:13:58.704463753 +0000 UTC m=+428.333797918" observedRunningTime="2026-02-16 11:14:00.945863062 +0000 UTC m=+430.575197227" watchObservedRunningTime="2026-02-16 11:14:00.94804086 +0000 UTC m=+430.577375025" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.950001 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-h48nm" podStartSLOduration=2.9141013559999998 podStartE2EDuration="4.949991242s" podCreationTimestamp="2026-02-16 11:13:56 +0000 UTC" firstStartedPulling="2026-02-16 11:13:57.673429195 +0000 UTC m=+427.302763360" lastFinishedPulling="2026-02-16 11:13:59.709319041 +0000 UTC m=+429.338653246" observedRunningTime="2026-02-16 11:14:00.925712373 +0000 UTC m=+430.555046538" watchObservedRunningTime="2026-02-16 11:14:00.949991242 +0000 UTC m=+430.579325407" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.979756 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-console-config\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.979836 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-oauth-config\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.979906 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-oauth-serving-cert\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.979998 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-trusted-ca-bundle\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.980053 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qdjw\" (UniqueName: \"kubernetes.io/projected/5b145423-74cd-418e-9932-cff00aee5ac6-kube-api-access-9qdjw\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.980132 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-service-ca\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:00 crc kubenswrapper[4949]: I0216 11:14:00.980275 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-serving-cert\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.081271 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-serving-cert\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.081379 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-console-config\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.081400 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-oauth-config\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.081435 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-oauth-serving-cert\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.081468 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-trusted-ca-bundle\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.081497 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qdjw\" (UniqueName: \"kubernetes.io/projected/5b145423-74cd-418e-9932-cff00aee5ac6-kube-api-access-9qdjw\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.081525 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-service-ca\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.082464 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-service-ca\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.082730 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-oauth-serving-cert\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.083377 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-console-config\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.084652 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-trusted-ca-bundle\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.086117 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-serving-cert\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.088049 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-oauth-config\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.102889 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qdjw\" (UniqueName: \"kubernetes.io/projected/5b145423-74cd-418e-9932-cff00aee5ac6-kube-api-access-9qdjw\") pod \"console-7d46cdf4c7-mbnnn\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.157646 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.371343 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn"] Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.372616 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.375864 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.375873 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.376524 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.376573 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-7kw8l" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.378225 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-aegomhlisrck1" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.378224 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.385797 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/8801355f-9f5d-427a-be0a-27e925d25a10-metrics-server-audit-profiles\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.385844 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncz22\" (UniqueName: \"kubernetes.io/projected/8801355f-9f5d-427a-be0a-27e925d25a10-kube-api-access-ncz22\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.385880 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-secret-metrics-server-tls\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.385923 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8801355f-9f5d-427a-be0a-27e925d25a10-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.385942 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-secret-metrics-client-certs\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.385991 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-client-ca-bundle\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.386020 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/8801355f-9f5d-427a-be0a-27e925d25a10-audit-log\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.386201 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn"] Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.486929 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncz22\" (UniqueName: \"kubernetes.io/projected/8801355f-9f5d-427a-be0a-27e925d25a10-kube-api-access-ncz22\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.487016 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-secret-metrics-server-tls\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.487079 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8801355f-9f5d-427a-be0a-27e925d25a10-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.487102 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-secret-metrics-client-certs\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.487139 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-client-ca-bundle\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.487193 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/8801355f-9f5d-427a-be0a-27e925d25a10-audit-log\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.487241 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/8801355f-9f5d-427a-be0a-27e925d25a10-metrics-server-audit-profiles\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.488829 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/8801355f-9f5d-427a-be0a-27e925d25a10-metrics-server-audit-profiles\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.489213 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/8801355f-9f5d-427a-be0a-27e925d25a10-audit-log\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.489779 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8801355f-9f5d-427a-be0a-27e925d25a10-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.492512 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-secret-metrics-client-certs\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.492743 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-secret-metrics-server-tls\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.494534 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8801355f-9f5d-427a-be0a-27e925d25a10-client-ca-bundle\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.504132 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncz22\" (UniqueName: \"kubernetes.io/projected/8801355f-9f5d-427a-be0a-27e925d25a10-kube-api-access-ncz22\") pod \"metrics-server-5cf8fb7db7-v2cdn\" (UID: \"8801355f-9f5d-427a-be0a-27e925d25a10\") " pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.694343 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.797725 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw"] Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.799822 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.802352 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.802465 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.809067 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw"] Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.866297 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" event={"ID":"e637f49b-86a6-4961-bdac-3d5d751aa75e","Type":"ContainerStarted","Data":"3bf4cc0346dd58124699520fd7c9442be8fa08aeea6fed0025e49846f918a585"} Feb 16 11:14:01 crc kubenswrapper[4949]: I0216 11:14:01.997168 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/ea73c187-a1cf-4bd0-b52a-0461302e62b5-monitoring-plugin-cert\") pod \"monitoring-plugin-686d98f9fb-sftlw\" (UID: \"ea73c187-a1cf-4bd0-b52a-0461302e62b5\") " pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.005601 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7d46cdf4c7-mbnnn"] Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.099821 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/ea73c187-a1cf-4bd0-b52a-0461302e62b5-monitoring-plugin-cert\") pod \"monitoring-plugin-686d98f9fb-sftlw\" (UID: \"ea73c187-a1cf-4bd0-b52a-0461302e62b5\") " pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.106401 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/ea73c187-a1cf-4bd0-b52a-0461302e62b5-monitoring-plugin-cert\") pod \"monitoring-plugin-686d98f9fb-sftlw\" (UID: \"ea73c187-a1cf-4bd0-b52a-0461302e62b5\") " pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.119796 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.135887 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn"] Feb 16 11:14:02 crc kubenswrapper[4949]: W0216 11:14:02.150516 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8801355f_9f5d_427a_be0a_27e925d25a10.slice/crio-65f883aa9887c9d29554e0d145ef14a1359ea0ffb8d36791a16784af856939f8 WatchSource:0}: Error finding container 65f883aa9887c9d29554e0d145ef14a1359ea0ffb8d36791a16784af856939f8: Status 404 returned error can't find the container with id 65f883aa9887c9d29554e0d145ef14a1359ea0ffb8d36791a16784af856939f8 Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.340285 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw"] Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.398471 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.400650 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.403945 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404017 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-web-config\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404057 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404081 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404104 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404129 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d86fd7a1-d075-48f3-9c35-8cde74d5981b-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404152 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404194 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404218 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404264 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404294 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404323 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404344 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9n8b\" (UniqueName: \"kubernetes.io/projected/d86fd7a1-d075-48f3-9c35-8cde74d5981b-kube-api-access-l9n8b\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404371 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404396 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-config\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404414 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404436 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d86fd7a1-d075-48f3-9c35-8cde74d5981b-config-out\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404461 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.404860 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.405082 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.405262 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.405818 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.413507 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-4bcjn" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.413785 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.413946 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.414102 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-27jcavac32kmu" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.414315 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.421213 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.421539 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.429108 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.431966 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.465947 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506256 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506307 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506327 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506349 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d86fd7a1-d075-48f3-9c35-8cde74d5981b-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506366 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506390 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506407 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506451 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506475 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506491 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506506 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9n8b\" (UniqueName: \"kubernetes.io/projected/d86fd7a1-d075-48f3-9c35-8cde74d5981b-kube-api-access-l9n8b\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506522 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506543 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-config\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506558 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506576 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d86fd7a1-d075-48f3-9c35-8cde74d5981b-config-out\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506591 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506620 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.506644 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-web-config\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.507561 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.507817 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.508286 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.509331 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.511970 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.513575 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d86fd7a1-d075-48f3-9c35-8cde74d5981b-config-out\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.514281 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d86fd7a1-d075-48f3-9c35-8cde74d5981b-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.514364 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-web-config\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.514870 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.519248 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.520105 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.522901 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d86fd7a1-d075-48f3-9c35-8cde74d5981b-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.524040 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.525516 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.525603 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-config\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.528940 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.529064 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/d86fd7a1-d075-48f3-9c35-8cde74d5981b-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.539121 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9n8b\" (UniqueName: \"kubernetes.io/projected/d86fd7a1-d075-48f3-9c35-8cde74d5981b-kube-api-access-l9n8b\") pod \"prometheus-k8s-0\" (UID: \"d86fd7a1-d075-48f3-9c35-8cde74d5981b\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.730225 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.873029 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" event={"ID":"8801355f-9f5d-427a-be0a-27e925d25a10","Type":"ContainerStarted","Data":"65f883aa9887c9d29554e0d145ef14a1359ea0ffb8d36791a16784af856939f8"} Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.875316 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7d46cdf4c7-mbnnn" event={"ID":"5b145423-74cd-418e-9932-cff00aee5ac6","Type":"ContainerStarted","Data":"eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734"} Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.875337 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7d46cdf4c7-mbnnn" event={"ID":"5b145423-74cd-418e-9932-cff00aee5ac6","Type":"ContainerStarted","Data":"a57f12e586c225274f136e950d2cfefbe3c1aa6ecb225c20e1b2206c3ffb1945"} Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.878322 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" event={"ID":"ea73c187-a1cf-4bd0-b52a-0461302e62b5","Type":"ContainerStarted","Data":"03b67a41297b91eb7c17e008cf6148436eab5926d09a2d48e12f926bbcb0c894"} Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.881810 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" event={"ID":"e637f49b-86a6-4961-bdac-3d5d751aa75e","Type":"ContainerStarted","Data":"6e415d7a72df174bac8f985c716187ce759b5bbafb287f6672edf1cb96065fa2"} Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.881882 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" event={"ID":"e637f49b-86a6-4961-bdac-3d5d751aa75e","Type":"ContainerStarted","Data":"3abc0499118956d1f755151412b39c8f2e637d0f515ee7872a0c4810bcc3c73e"} Feb 16 11:14:02 crc kubenswrapper[4949]: I0216 11:14:02.896898 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7d46cdf4c7-mbnnn" podStartSLOduration=2.896868259 podStartE2EDuration="2.896868259s" podCreationTimestamp="2026-02-16 11:14:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:14:02.892635086 +0000 UTC m=+432.521969251" watchObservedRunningTime="2026-02-16 11:14:02.896868259 +0000 UTC m=+432.526202424" Feb 16 11:14:03 crc kubenswrapper[4949]: I0216 11:14:03.013496 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Feb 16 11:14:03 crc kubenswrapper[4949]: I0216 11:14:03.892359 4949 generic.go:334] "Generic (PLEG): container finished" podID="d86fd7a1-d075-48f3-9c35-8cde74d5981b" containerID="8d044dfad5ca28b2e2657fb40bd8b735b820bb46e954709ce80b54dc5cfbd274" exitCode=0 Feb 16 11:14:03 crc kubenswrapper[4949]: I0216 11:14:03.892491 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"d86fd7a1-d075-48f3-9c35-8cde74d5981b","Type":"ContainerDied","Data":"8d044dfad5ca28b2e2657fb40bd8b735b820bb46e954709ce80b54dc5cfbd274"} Feb 16 11:14:03 crc kubenswrapper[4949]: I0216 11:14:03.892988 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"d86fd7a1-d075-48f3-9c35-8cde74d5981b","Type":"ContainerStarted","Data":"83a0e01a6252e14a53dcacd53a4ab8e183bade1f6feb465c8e38ab8b878ef72f"} Feb 16 11:14:04 crc kubenswrapper[4949]: I0216 11:14:04.550154 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:14:04 crc kubenswrapper[4949]: I0216 11:14:04.550255 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:14:04 crc kubenswrapper[4949]: I0216 11:14:04.550313 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:14:04 crc kubenswrapper[4949]: I0216 11:14:04.551238 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"85a0f2845d673d577410365d3d922f1e1f73440200500da1d4a358fe007bfb62"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:14:04 crc kubenswrapper[4949]: I0216 11:14:04.551305 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://85a0f2845d673d577410365d3d922f1e1f73440200500da1d4a358fe007bfb62" gracePeriod=600 Feb 16 11:14:04 crc kubenswrapper[4949]: I0216 11:14:04.901273 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="85a0f2845d673d577410365d3d922f1e1f73440200500da1d4a358fe007bfb62" exitCode=0 Feb 16 11:14:04 crc kubenswrapper[4949]: I0216 11:14:04.901325 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"85a0f2845d673d577410365d3d922f1e1f73440200500da1d4a358fe007bfb62"} Feb 16 11:14:04 crc kubenswrapper[4949]: I0216 11:14:04.901377 4949 scope.go:117] "RemoveContainer" containerID="4978ec698681f01249736b54aa5a1fba37fc94f3667b735c99bdfb97d3bd3380" Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.923939 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" event={"ID":"e637f49b-86a6-4961-bdac-3d5d751aa75e","Type":"ContainerStarted","Data":"4e15272eed0e7928e1743452242c48a6ee67e7a75dc7713fe0dc932e4c984cd5"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.924440 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" event={"ID":"e637f49b-86a6-4961-bdac-3d5d751aa75e","Type":"ContainerStarted","Data":"cb2c4eb1bc14409ba5289c0f0bea49a8811305bf5ba3a2920e0b5c9e2fb2c46c"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.924453 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" event={"ID":"e637f49b-86a6-4961-bdac-3d5d751aa75e","Type":"ContainerStarted","Data":"596d032e35a57a1aa772365b7a4bbddfdb017175bd5a218f7f33468ff7ddda33"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.925451 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.930192 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"efcb1701-3963-48c2-894a-c754339e78b0","Type":"ContainerStarted","Data":"73e9f2e7d26c923d43d5ccc32c2a434e7395a5ef8a9c86587629f465736a9310"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.930218 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"efcb1701-3963-48c2-894a-c754339e78b0","Type":"ContainerStarted","Data":"5d6413cde21916a40d1d944ffdce9e59dab5aea8de31f37db896d5492766d883"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.930227 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"efcb1701-3963-48c2-894a-c754339e78b0","Type":"ContainerStarted","Data":"2ea5b8c102d9528925cf9f12ac1a78d06df9032b5caffb69fcf3aa14d9ff5c39"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.933397 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"749481410beaac0c07fd7ccbbe2c5d04579b8c026030bdd77c0733972cd5371b"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.935776 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" event={"ID":"8801355f-9f5d-427a-be0a-27e925d25a10","Type":"ContainerStarted","Data":"7e5781c5d03a6ee40e9b8d9c3b24368afbb47a6a2dc2cdf4eda16bf6d201f92b"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.943665 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" event={"ID":"ea73c187-a1cf-4bd0-b52a-0461302e62b5","Type":"ContainerStarted","Data":"98a3b4b18fb358213fab4f909ba68fa15405ff486c43dcdf8272faaca23c2f2f"} Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.960791 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" podStartSLOduration=1.5856111149999998 podStartE2EDuration="7.960755053s" podCreationTimestamp="2026-02-16 11:13:58 +0000 UTC" firstStartedPulling="2026-02-16 11:13:58.950249993 +0000 UTC m=+428.579584158" lastFinishedPulling="2026-02-16 11:14:05.325393931 +0000 UTC m=+434.954728096" observedRunningTime="2026-02-16 11:14:05.950818948 +0000 UTC m=+435.580153123" watchObservedRunningTime="2026-02-16 11:14:05.960755053 +0000 UTC m=+435.590089218" Feb 16 11:14:05 crc kubenswrapper[4949]: I0216 11:14:05.987898 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" podStartSLOduration=1.822002059 podStartE2EDuration="4.987869238s" podCreationTimestamp="2026-02-16 11:14:01 +0000 UTC" firstStartedPulling="2026-02-16 11:14:02.159788459 +0000 UTC m=+431.789122624" lastFinishedPulling="2026-02-16 11:14:05.325655628 +0000 UTC m=+434.954989803" observedRunningTime="2026-02-16 11:14:05.9864525 +0000 UTC m=+435.615786675" watchObservedRunningTime="2026-02-16 11:14:05.987869238 +0000 UTC m=+435.617203403" Feb 16 11:14:06 crc kubenswrapper[4949]: I0216 11:14:06.010356 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" podStartSLOduration=2.037957482 podStartE2EDuration="5.010337779s" podCreationTimestamp="2026-02-16 11:14:01 +0000 UTC" firstStartedPulling="2026-02-16 11:14:02.3536425 +0000 UTC m=+431.982976665" lastFinishedPulling="2026-02-16 11:14:05.326022797 +0000 UTC m=+434.955356962" observedRunningTime="2026-02-16 11:14:06.004617066 +0000 UTC m=+435.633951231" watchObservedRunningTime="2026-02-16 11:14:06.010337779 +0000 UTC m=+435.639671944" Feb 16 11:14:06 crc kubenswrapper[4949]: I0216 11:14:06.959246 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"efcb1701-3963-48c2-894a-c754339e78b0","Type":"ContainerStarted","Data":"3128a4b9c0afc2cd9396280b0c051d6b166c6d8ad8305eefa44a3cf81ab7cb47"} Feb 16 11:14:06 crc kubenswrapper[4949]: I0216 11:14:06.959778 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"efcb1701-3963-48c2-894a-c754339e78b0","Type":"ContainerStarted","Data":"471d7b976a84c0a0c8810ba7ba15fb574fee4ab809cab2e061cfa8ded419dcf1"} Feb 16 11:14:06 crc kubenswrapper[4949]: I0216 11:14:06.959789 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"efcb1701-3963-48c2-894a-c754339e78b0","Type":"ContainerStarted","Data":"60f9e43ea84dbac4b34c2c24a0368da70fc8677d6953d37fba5bf18ffdc4b9a5"} Feb 16 11:14:06 crc kubenswrapper[4949]: I0216 11:14:06.962617 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" Feb 16 11:14:06 crc kubenswrapper[4949]: I0216 11:14:06.972891 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-686d98f9fb-sftlw" Feb 16 11:14:06 crc kubenswrapper[4949]: I0216 11:14:06.976652 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-6b868984d8-gd5qc" Feb 16 11:14:06 crc kubenswrapper[4949]: I0216 11:14:06.992199 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=3.422934325 podStartE2EDuration="9.992164191s" podCreationTimestamp="2026-02-16 11:13:57 +0000 UTC" firstStartedPulling="2026-02-16 11:13:58.756260097 +0000 UTC m=+428.385594262" lastFinishedPulling="2026-02-16 11:14:05.325489973 +0000 UTC m=+434.954824128" observedRunningTime="2026-02-16 11:14:06.990386713 +0000 UTC m=+436.619720888" watchObservedRunningTime="2026-02-16 11:14:06.992164191 +0000 UTC m=+436.621498366" Feb 16 11:14:09 crc kubenswrapper[4949]: I0216 11:14:09.993344 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"d86fd7a1-d075-48f3-9c35-8cde74d5981b","Type":"ContainerStarted","Data":"0a7aaab5153b1ab965e0f8fb958c4bd69d445577486297e552df0900879b5b77"} Feb 16 11:14:10 crc kubenswrapper[4949]: I0216 11:14:09.994928 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"d86fd7a1-d075-48f3-9c35-8cde74d5981b","Type":"ContainerStarted","Data":"752ee0fc1473e604e5f1d8e43381d040c3a1ebb759af71a524b0ba403b7503a7"} Feb 16 11:14:10 crc kubenswrapper[4949]: I0216 11:14:09.994946 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"d86fd7a1-d075-48f3-9c35-8cde74d5981b","Type":"ContainerStarted","Data":"7d140a36da68b1a8af3bf456684ead18675d6cbf8bd243009626b703b87937ae"} Feb 16 11:14:10 crc kubenswrapper[4949]: I0216 11:14:09.994957 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"d86fd7a1-d075-48f3-9c35-8cde74d5981b","Type":"ContainerStarted","Data":"21b32351153bbaf317a60654a8cea499ee8f42fa187f3c37ada347a485f8aac0"} Feb 16 11:14:10 crc kubenswrapper[4949]: I0216 11:14:09.994967 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"d86fd7a1-d075-48f3-9c35-8cde74d5981b","Type":"ContainerStarted","Data":"82b9b4bbab17f0cc81679f5b78cc82e4a1d0c44f8db6b192577964c693ab2e70"} Feb 16 11:14:11 crc kubenswrapper[4949]: I0216 11:14:11.007341 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"d86fd7a1-d075-48f3-9c35-8cde74d5981b","Type":"ContainerStarted","Data":"336bae6215f319586cd39b0b161019d1abe5ca22db934fc387d1ac3051a0ef46"} Feb 16 11:14:11 crc kubenswrapper[4949]: I0216 11:14:11.037750 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=3.771877074 podStartE2EDuration="9.037720834s" podCreationTimestamp="2026-02-16 11:14:02 +0000 UTC" firstStartedPulling="2026-02-16 11:14:03.895689416 +0000 UTC m=+433.525023581" lastFinishedPulling="2026-02-16 11:14:09.161533176 +0000 UTC m=+438.790867341" observedRunningTime="2026-02-16 11:14:11.035283289 +0000 UTC m=+440.664617474" watchObservedRunningTime="2026-02-16 11:14:11.037720834 +0000 UTC m=+440.667054999" Feb 16 11:14:11 crc kubenswrapper[4949]: I0216 11:14:11.158529 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:11 crc kubenswrapper[4949]: I0216 11:14:11.158618 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:11 crc kubenswrapper[4949]: I0216 11:14:11.167619 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:12 crc kubenswrapper[4949]: I0216 11:14:12.018809 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:14:12 crc kubenswrapper[4949]: I0216 11:14:12.084125 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5xfq9"] Feb 16 11:14:12 crc kubenswrapper[4949]: I0216 11:14:12.730758 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:14:21 crc kubenswrapper[4949]: I0216 11:14:21.696402 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:21 crc kubenswrapper[4949]: I0216 11:14:21.697458 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.135713 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-5xfq9" podUID="468461de-4a56-47b0-a5a9-cf6e51b6de47" containerName="console" containerID="cri-o://870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a" gracePeriod=15 Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.537850 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5xfq9_468461de-4a56-47b0-a5a9-cf6e51b6de47/console/0.log" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.537936 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.693600 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-config\") pod \"468461de-4a56-47b0-a5a9-cf6e51b6de47\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.693689 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-oauth-config\") pod \"468461de-4a56-47b0-a5a9-cf6e51b6de47\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.693839 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-serving-cert\") pod \"468461de-4a56-47b0-a5a9-cf6e51b6de47\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.693909 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85hth\" (UniqueName: \"kubernetes.io/projected/468461de-4a56-47b0-a5a9-cf6e51b6de47-kube-api-access-85hth\") pod \"468461de-4a56-47b0-a5a9-cf6e51b6de47\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.693978 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-trusted-ca-bundle\") pod \"468461de-4a56-47b0-a5a9-cf6e51b6de47\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.694035 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-service-ca\") pod \"468461de-4a56-47b0-a5a9-cf6e51b6de47\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.694081 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-oauth-serving-cert\") pod \"468461de-4a56-47b0-a5a9-cf6e51b6de47\" (UID: \"468461de-4a56-47b0-a5a9-cf6e51b6de47\") " Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.694623 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-config" (OuterVolumeSpecName: "console-config") pod "468461de-4a56-47b0-a5a9-cf6e51b6de47" (UID: "468461de-4a56-47b0-a5a9-cf6e51b6de47"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.695136 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-service-ca" (OuterVolumeSpecName: "service-ca") pod "468461de-4a56-47b0-a5a9-cf6e51b6de47" (UID: "468461de-4a56-47b0-a5a9-cf6e51b6de47"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.695153 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "468461de-4a56-47b0-a5a9-cf6e51b6de47" (UID: "468461de-4a56-47b0-a5a9-cf6e51b6de47"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.695254 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "468461de-4a56-47b0-a5a9-cf6e51b6de47" (UID: "468461de-4a56-47b0-a5a9-cf6e51b6de47"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.708505 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "468461de-4a56-47b0-a5a9-cf6e51b6de47" (UID: "468461de-4a56-47b0-a5a9-cf6e51b6de47"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.708514 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/468461de-4a56-47b0-a5a9-cf6e51b6de47-kube-api-access-85hth" (OuterVolumeSpecName: "kube-api-access-85hth") pod "468461de-4a56-47b0-a5a9-cf6e51b6de47" (UID: "468461de-4a56-47b0-a5a9-cf6e51b6de47"). InnerVolumeSpecName "kube-api-access-85hth". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.709644 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "468461de-4a56-47b0-a5a9-cf6e51b6de47" (UID: "468461de-4a56-47b0-a5a9-cf6e51b6de47"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.796236 4949 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.796296 4949 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.796320 4949 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.796339 4949 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.796358 4949 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/468461de-4a56-47b0-a5a9-cf6e51b6de47-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.796377 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85hth\" (UniqueName: \"kubernetes.io/projected/468461de-4a56-47b0-a5a9-cf6e51b6de47-kube-api-access-85hth\") on node \"crc\" DevicePath \"\"" Feb 16 11:14:37 crc kubenswrapper[4949]: I0216 11:14:37.796397 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/468461de-4a56-47b0-a5a9-cf6e51b6de47-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.216063 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5xfq9_468461de-4a56-47b0-a5a9-cf6e51b6de47/console/0.log" Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.216602 4949 generic.go:334] "Generic (PLEG): container finished" podID="468461de-4a56-47b0-a5a9-cf6e51b6de47" containerID="870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a" exitCode=2 Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.216660 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5xfq9" event={"ID":"468461de-4a56-47b0-a5a9-cf6e51b6de47","Type":"ContainerDied","Data":"870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a"} Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.216685 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5xfq9" Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.216721 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5xfq9" event={"ID":"468461de-4a56-47b0-a5a9-cf6e51b6de47","Type":"ContainerDied","Data":"ff680392fb14b709b616e517bf0a18415e432b73d01a7d2c3fbf256cd1917130"} Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.216750 4949 scope.go:117] "RemoveContainer" containerID="870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a" Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.251314 4949 scope.go:117] "RemoveContainer" containerID="870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a" Feb 16 11:14:38 crc kubenswrapper[4949]: E0216 11:14:38.253000 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a\": container with ID starting with 870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a not found: ID does not exist" containerID="870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a" Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.253046 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a"} err="failed to get container status \"870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a\": rpc error: code = NotFound desc = could not find container \"870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a\": container with ID starting with 870de372b4c2d58cdef50c5437346cee8087d575aef7f1fbec8574fb871d0f4a not found: ID does not exist" Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.258476 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5xfq9"] Feb 16 11:14:38 crc kubenswrapper[4949]: I0216 11:14:38.268642 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-5xfq9"] Feb 16 11:14:39 crc kubenswrapper[4949]: I0216 11:14:39.247622 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="468461de-4a56-47b0-a5a9-cf6e51b6de47" path="/var/lib/kubelet/pods/468461de-4a56-47b0-a5a9-cf6e51b6de47/volumes" Feb 16 11:14:41 crc kubenswrapper[4949]: I0216 11:14:41.702256 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:14:41 crc kubenswrapper[4949]: I0216 11:14:41.707695 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-5cf8fb7db7-v2cdn" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.174670 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6"] Feb 16 11:15:00 crc kubenswrapper[4949]: E0216 11:15:00.175943 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="468461de-4a56-47b0-a5a9-cf6e51b6de47" containerName="console" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.175961 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="468461de-4a56-47b0-a5a9-cf6e51b6de47" containerName="console" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.176125 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="468461de-4a56-47b0-a5a9-cf6e51b6de47" containerName="console" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.177909 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.181155 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.181506 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.185123 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6"] Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.185439 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22nq2\" (UniqueName: \"kubernetes.io/projected/a9d0f7d5-6713-4be3-8daf-551e1647ad78-kube-api-access-22nq2\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.185513 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a9d0f7d5-6713-4be3-8daf-551e1647ad78-secret-volume\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.185582 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a9d0f7d5-6713-4be3-8daf-551e1647ad78-config-volume\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.286766 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a9d0f7d5-6713-4be3-8daf-551e1647ad78-secret-volume\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.287574 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a9d0f7d5-6713-4be3-8daf-551e1647ad78-config-volume\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.287650 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22nq2\" (UniqueName: \"kubernetes.io/projected/a9d0f7d5-6713-4be3-8daf-551e1647ad78-kube-api-access-22nq2\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.288805 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a9d0f7d5-6713-4be3-8daf-551e1647ad78-config-volume\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.293473 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a9d0f7d5-6713-4be3-8daf-551e1647ad78-secret-volume\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.316971 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22nq2\" (UniqueName: \"kubernetes.io/projected/a9d0f7d5-6713-4be3-8daf-551e1647ad78-kube-api-access-22nq2\") pod \"collect-profiles-29520675-fcng6\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.502825 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:00 crc kubenswrapper[4949]: I0216 11:15:00.940562 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6"] Feb 16 11:15:01 crc kubenswrapper[4949]: I0216 11:15:01.379398 4949 generic.go:334] "Generic (PLEG): container finished" podID="a9d0f7d5-6713-4be3-8daf-551e1647ad78" containerID="3ec6469594d830601a5cc016e458f152574f921a55ce547dc9339c1b930a81ee" exitCode=0 Feb 16 11:15:01 crc kubenswrapper[4949]: I0216 11:15:01.379638 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" event={"ID":"a9d0f7d5-6713-4be3-8daf-551e1647ad78","Type":"ContainerDied","Data":"3ec6469594d830601a5cc016e458f152574f921a55ce547dc9339c1b930a81ee"} Feb 16 11:15:01 crc kubenswrapper[4949]: I0216 11:15:01.380453 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" event={"ID":"a9d0f7d5-6713-4be3-8daf-551e1647ad78","Type":"ContainerStarted","Data":"518fc2c483ef74afcd8c839e07357ea1a6aa2147a1715ebff7109778468abb29"} Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.660666 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.731814 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.795731 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.835018 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a9d0f7d5-6713-4be3-8daf-551e1647ad78-config-volume\") pod \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.835155 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a9d0f7d5-6713-4be3-8daf-551e1647ad78-secret-volume\") pod \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.835252 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22nq2\" (UniqueName: \"kubernetes.io/projected/a9d0f7d5-6713-4be3-8daf-551e1647ad78-kube-api-access-22nq2\") pod \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\" (UID: \"a9d0f7d5-6713-4be3-8daf-551e1647ad78\") " Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.836558 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9d0f7d5-6713-4be3-8daf-551e1647ad78-config-volume" (OuterVolumeSpecName: "config-volume") pod "a9d0f7d5-6713-4be3-8daf-551e1647ad78" (UID: "a9d0f7d5-6713-4be3-8daf-551e1647ad78"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.838627 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a9d0f7d5-6713-4be3-8daf-551e1647ad78-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.843317 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9d0f7d5-6713-4be3-8daf-551e1647ad78-kube-api-access-22nq2" (OuterVolumeSpecName: "kube-api-access-22nq2") pod "a9d0f7d5-6713-4be3-8daf-551e1647ad78" (UID: "a9d0f7d5-6713-4be3-8daf-551e1647ad78"). InnerVolumeSpecName "kube-api-access-22nq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.844302 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9d0f7d5-6713-4be3-8daf-551e1647ad78-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a9d0f7d5-6713-4be3-8daf-551e1647ad78" (UID: "a9d0f7d5-6713-4be3-8daf-551e1647ad78"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.939492 4949 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a9d0f7d5-6713-4be3-8daf-551e1647ad78-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:15:02 crc kubenswrapper[4949]: I0216 11:15:02.939557 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22nq2\" (UniqueName: \"kubernetes.io/projected/a9d0f7d5-6713-4be3-8daf-551e1647ad78-kube-api-access-22nq2\") on node \"crc\" DevicePath \"\"" Feb 16 11:15:03 crc kubenswrapper[4949]: I0216 11:15:03.396063 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" event={"ID":"a9d0f7d5-6713-4be3-8daf-551e1647ad78","Type":"ContainerDied","Data":"518fc2c483ef74afcd8c839e07357ea1a6aa2147a1715ebff7109778468abb29"} Feb 16 11:15:03 crc kubenswrapper[4949]: I0216 11:15:03.396895 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="518fc2c483ef74afcd8c839e07357ea1a6aa2147a1715ebff7109778468abb29" Feb 16 11:15:03 crc kubenswrapper[4949]: I0216 11:15:03.396080 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6" Feb 16 11:15:03 crc kubenswrapper[4949]: I0216 11:15:03.434560 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.566600 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-786f9df47f-xh4rf"] Feb 16 11:15:33 crc kubenswrapper[4949]: E0216 11:15:33.567835 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9d0f7d5-6713-4be3-8daf-551e1647ad78" containerName="collect-profiles" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.567851 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9d0f7d5-6713-4be3-8daf-551e1647ad78" containerName="collect-profiles" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.567993 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9d0f7d5-6713-4be3-8daf-551e1647ad78" containerName="collect-profiles" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.568689 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.584135 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-786f9df47f-xh4rf"] Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.688965 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-console-config\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.689030 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-oauth-serving-cert\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.689078 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rbmc\" (UniqueName: \"kubernetes.io/projected/01deed3a-f137-429f-b5fd-672a361aa014-kube-api-access-2rbmc\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.689107 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-service-ca\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.689676 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-oauth-config\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.689749 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-trusted-ca-bundle\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.689806 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-serving-cert\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.790958 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rbmc\" (UniqueName: \"kubernetes.io/projected/01deed3a-f137-429f-b5fd-672a361aa014-kube-api-access-2rbmc\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.791023 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-service-ca\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.791047 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-oauth-config\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.791068 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-trusted-ca-bundle\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.791105 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-serving-cert\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.791207 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-console-config\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.791239 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-oauth-serving-cert\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.792414 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-oauth-serving-cert\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.792549 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-service-ca\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.792590 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-trusted-ca-bundle\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.793293 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-console-config\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.799028 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-serving-cert\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.804310 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-oauth-config\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.808797 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rbmc\" (UniqueName: \"kubernetes.io/projected/01deed3a-f137-429f-b5fd-672a361aa014-kube-api-access-2rbmc\") pod \"console-786f9df47f-xh4rf\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:33 crc kubenswrapper[4949]: I0216 11:15:33.891008 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:34 crc kubenswrapper[4949]: I0216 11:15:34.079732 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-786f9df47f-xh4rf"] Feb 16 11:15:34 crc kubenswrapper[4949]: I0216 11:15:34.634276 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-786f9df47f-xh4rf" event={"ID":"01deed3a-f137-429f-b5fd-672a361aa014","Type":"ContainerStarted","Data":"dc4b7a4cf3e307f9fc378b5898f19b7643650d92659511e1c725565492a93948"} Feb 16 11:15:34 crc kubenswrapper[4949]: I0216 11:15:34.634710 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-786f9df47f-xh4rf" event={"ID":"01deed3a-f137-429f-b5fd-672a361aa014","Type":"ContainerStarted","Data":"e9a140c4b504f7a5a942092929dfd78c2dad8c83d9045846e46f28a182feb8bd"} Feb 16 11:15:34 crc kubenswrapper[4949]: I0216 11:15:34.661347 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-786f9df47f-xh4rf" podStartSLOduration=1.661319435 podStartE2EDuration="1.661319435s" podCreationTimestamp="2026-02-16 11:15:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:15:34.654648117 +0000 UTC m=+524.283982312" watchObservedRunningTime="2026-02-16 11:15:34.661319435 +0000 UTC m=+524.290653600" Feb 16 11:15:43 crc kubenswrapper[4949]: I0216 11:15:43.891779 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:43 crc kubenswrapper[4949]: I0216 11:15:43.894077 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:43 crc kubenswrapper[4949]: I0216 11:15:43.896998 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:44 crc kubenswrapper[4949]: I0216 11:15:44.751128 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:15:44 crc kubenswrapper[4949]: I0216 11:15:44.869890 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7d46cdf4c7-mbnnn"] Feb 16 11:16:09 crc kubenswrapper[4949]: I0216 11:16:09.916025 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-7d46cdf4c7-mbnnn" podUID="5b145423-74cd-418e-9932-cff00aee5ac6" containerName="console" containerID="cri-o://eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734" gracePeriod=15 Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.322217 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7d46cdf4c7-mbnnn_5b145423-74cd-418e-9932-cff00aee5ac6/console/0.log" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.322553 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.448752 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qdjw\" (UniqueName: \"kubernetes.io/projected/5b145423-74cd-418e-9932-cff00aee5ac6-kube-api-access-9qdjw\") pod \"5b145423-74cd-418e-9932-cff00aee5ac6\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.448853 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-serving-cert\") pod \"5b145423-74cd-418e-9932-cff00aee5ac6\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.448989 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-console-config\") pod \"5b145423-74cd-418e-9932-cff00aee5ac6\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.449029 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-service-ca\") pod \"5b145423-74cd-418e-9932-cff00aee5ac6\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.449138 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-oauth-serving-cert\") pod \"5b145423-74cd-418e-9932-cff00aee5ac6\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.449199 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-trusted-ca-bundle\") pod \"5b145423-74cd-418e-9932-cff00aee5ac6\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.449243 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-oauth-config\") pod \"5b145423-74cd-418e-9932-cff00aee5ac6\" (UID: \"5b145423-74cd-418e-9932-cff00aee5ac6\") " Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.450422 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-console-config" (OuterVolumeSpecName: "console-config") pod "5b145423-74cd-418e-9932-cff00aee5ac6" (UID: "5b145423-74cd-418e-9932-cff00aee5ac6"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.450837 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-service-ca" (OuterVolumeSpecName: "service-ca") pod "5b145423-74cd-418e-9932-cff00aee5ac6" (UID: "5b145423-74cd-418e-9932-cff00aee5ac6"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.450862 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "5b145423-74cd-418e-9932-cff00aee5ac6" (UID: "5b145423-74cd-418e-9932-cff00aee5ac6"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.450942 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "5b145423-74cd-418e-9932-cff00aee5ac6" (UID: "5b145423-74cd-418e-9932-cff00aee5ac6"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.457075 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "5b145423-74cd-418e-9932-cff00aee5ac6" (UID: "5b145423-74cd-418e-9932-cff00aee5ac6"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.460190 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b145423-74cd-418e-9932-cff00aee5ac6-kube-api-access-9qdjw" (OuterVolumeSpecName: "kube-api-access-9qdjw") pod "5b145423-74cd-418e-9932-cff00aee5ac6" (UID: "5b145423-74cd-418e-9932-cff00aee5ac6"). InnerVolumeSpecName "kube-api-access-9qdjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.460321 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "5b145423-74cd-418e-9932-cff00aee5ac6" (UID: "5b145423-74cd-418e-9932-cff00aee5ac6"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.550634 4949 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.550687 4949 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-console-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.550699 4949 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.550710 4949 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.550721 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5b145423-74cd-418e-9932-cff00aee5ac6-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.550732 4949 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5b145423-74cd-418e-9932-cff00aee5ac6-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.550745 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qdjw\" (UniqueName: \"kubernetes.io/projected/5b145423-74cd-418e-9932-cff00aee5ac6-kube-api-access-9qdjw\") on node \"crc\" DevicePath \"\"" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.947283 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7d46cdf4c7-mbnnn_5b145423-74cd-418e-9932-cff00aee5ac6/console/0.log" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.947362 4949 generic.go:334] "Generic (PLEG): container finished" podID="5b145423-74cd-418e-9932-cff00aee5ac6" containerID="eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734" exitCode=2 Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.947429 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7d46cdf4c7-mbnnn" event={"ID":"5b145423-74cd-418e-9932-cff00aee5ac6","Type":"ContainerDied","Data":"eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734"} Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.947486 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7d46cdf4c7-mbnnn" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.947518 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7d46cdf4c7-mbnnn" event={"ID":"5b145423-74cd-418e-9932-cff00aee5ac6","Type":"ContainerDied","Data":"a57f12e586c225274f136e950d2cfefbe3c1aa6ecb225c20e1b2206c3ffb1945"} Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.947557 4949 scope.go:117] "RemoveContainer" containerID="eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.973906 4949 scope.go:117] "RemoveContainer" containerID="eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734" Feb 16 11:16:10 crc kubenswrapper[4949]: E0216 11:16:10.975469 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734\": container with ID starting with eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734 not found: ID does not exist" containerID="eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.975533 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734"} err="failed to get container status \"eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734\": rpc error: code = NotFound desc = could not find container \"eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734\": container with ID starting with eaae8c13eecaa9d7b6dc5cf00c5648ede1159439016a18b1772071f4f291b734 not found: ID does not exist" Feb 16 11:16:10 crc kubenswrapper[4949]: I0216 11:16:10.998161 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7d46cdf4c7-mbnnn"] Feb 16 11:16:11 crc kubenswrapper[4949]: I0216 11:16:11.004717 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-7d46cdf4c7-mbnnn"] Feb 16 11:16:11 crc kubenswrapper[4949]: I0216 11:16:11.245575 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b145423-74cd-418e-9932-cff00aee5ac6" path="/var/lib/kubelet/pods/5b145423-74cd-418e-9932-cff00aee5ac6/volumes" Feb 16 11:16:34 crc kubenswrapper[4949]: I0216 11:16:34.550653 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:16:34 crc kubenswrapper[4949]: I0216 11:16:34.551713 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:17:04 crc kubenswrapper[4949]: I0216 11:17:04.550731 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:17:04 crc kubenswrapper[4949]: I0216 11:17:04.551251 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:17:34 crc kubenswrapper[4949]: I0216 11:17:34.550624 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:17:34 crc kubenswrapper[4949]: I0216 11:17:34.551738 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:17:34 crc kubenswrapper[4949]: I0216 11:17:34.551850 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:17:34 crc kubenswrapper[4949]: I0216 11:17:34.553077 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"749481410beaac0c07fd7ccbbe2c5d04579b8c026030bdd77c0733972cd5371b"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:17:34 crc kubenswrapper[4949]: I0216 11:17:34.553241 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://749481410beaac0c07fd7ccbbe2c5d04579b8c026030bdd77c0733972cd5371b" gracePeriod=600 Feb 16 11:17:35 crc kubenswrapper[4949]: I0216 11:17:35.594582 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="749481410beaac0c07fd7ccbbe2c5d04579b8c026030bdd77c0733972cd5371b" exitCode=0 Feb 16 11:17:35 crc kubenswrapper[4949]: I0216 11:17:35.594695 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"749481410beaac0c07fd7ccbbe2c5d04579b8c026030bdd77c0733972cd5371b"} Feb 16 11:17:35 crc kubenswrapper[4949]: I0216 11:17:35.595370 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"060f940f61e708f7f4d603618a347c5e6eb0b808f7ca4a5027e6133a9e486da3"} Feb 16 11:17:35 crc kubenswrapper[4949]: I0216 11:17:35.595406 4949 scope.go:117] "RemoveContainer" containerID="85a0f2845d673d577410365d3d922f1e1f73440200500da1d4a358fe007bfb62" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.277077 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8"] Feb 16 11:18:37 crc kubenswrapper[4949]: E0216 11:18:37.277787 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b145423-74cd-418e-9932-cff00aee5ac6" containerName="console" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.277809 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b145423-74cd-418e-9932-cff00aee5ac6" containerName="console" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.277987 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b145423-74cd-418e-9932-cff00aee5ac6" containerName="console" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.278987 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.281901 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.285642 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8"] Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.414502 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ql92\" (UniqueName: \"kubernetes.io/projected/a03698a2-f417-46be-a245-088d7a9a5ac5-kube-api-access-8ql92\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.414564 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.414651 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.516164 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ql92\" (UniqueName: \"kubernetes.io/projected/a03698a2-f417-46be-a245-088d7a9a5ac5-kube-api-access-8ql92\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.516501 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.516559 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.517067 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.517470 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.553191 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ql92\" (UniqueName: \"kubernetes.io/projected/a03698a2-f417-46be-a245-088d7a9a5ac5-kube-api-access-8ql92\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.601919 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:37 crc kubenswrapper[4949]: I0216 11:18:37.795216 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8"] Feb 16 11:18:38 crc kubenswrapper[4949]: I0216 11:18:38.057845 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" event={"ID":"a03698a2-f417-46be-a245-088d7a9a5ac5","Type":"ContainerStarted","Data":"a4ebf013f8c4eba8643bd428856bdbbf999dab1713eac5bb9e6598c94f3c2a38"} Feb 16 11:18:38 crc kubenswrapper[4949]: I0216 11:18:38.057891 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" event={"ID":"a03698a2-f417-46be-a245-088d7a9a5ac5","Type":"ContainerStarted","Data":"f3585b60652ac56d530af7e1abdba650db63931ca921a354fe199733b8a39432"} Feb 16 11:18:39 crc kubenswrapper[4949]: I0216 11:18:39.068881 4949 generic.go:334] "Generic (PLEG): container finished" podID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerID="a4ebf013f8c4eba8643bd428856bdbbf999dab1713eac5bb9e6598c94f3c2a38" exitCode=0 Feb 16 11:18:39 crc kubenswrapper[4949]: I0216 11:18:39.068988 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" event={"ID":"a03698a2-f417-46be-a245-088d7a9a5ac5","Type":"ContainerDied","Data":"a4ebf013f8c4eba8643bd428856bdbbf999dab1713eac5bb9e6598c94f3c2a38"} Feb 16 11:18:39 crc kubenswrapper[4949]: I0216 11:18:39.072090 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:18:41 crc kubenswrapper[4949]: I0216 11:18:41.087287 4949 generic.go:334] "Generic (PLEG): container finished" podID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerID="d87fdf97226a734ee829328e94003cc80b02599f9fe89c7453085dd5b55e41ef" exitCode=0 Feb 16 11:18:41 crc kubenswrapper[4949]: I0216 11:18:41.087405 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" event={"ID":"a03698a2-f417-46be-a245-088d7a9a5ac5","Type":"ContainerDied","Data":"d87fdf97226a734ee829328e94003cc80b02599f9fe89c7453085dd5b55e41ef"} Feb 16 11:18:42 crc kubenswrapper[4949]: I0216 11:18:42.100106 4949 generic.go:334] "Generic (PLEG): container finished" podID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerID="db374aeb999a4120137414e787a68515092eb1f06ca805b8ec876c2b6811698a" exitCode=0 Feb 16 11:18:42 crc kubenswrapper[4949]: I0216 11:18:42.100159 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" event={"ID":"a03698a2-f417-46be-a245-088d7a9a5ac5","Type":"ContainerDied","Data":"db374aeb999a4120137414e787a68515092eb1f06ca805b8ec876c2b6811698a"} Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.334893 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.413277 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-util\") pod \"a03698a2-f417-46be-a245-088d7a9a5ac5\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.413401 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-bundle\") pod \"a03698a2-f417-46be-a245-088d7a9a5ac5\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.413489 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ql92\" (UniqueName: \"kubernetes.io/projected/a03698a2-f417-46be-a245-088d7a9a5ac5-kube-api-access-8ql92\") pod \"a03698a2-f417-46be-a245-088d7a9a5ac5\" (UID: \"a03698a2-f417-46be-a245-088d7a9a5ac5\") " Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.415409 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-bundle" (OuterVolumeSpecName: "bundle") pod "a03698a2-f417-46be-a245-088d7a9a5ac5" (UID: "a03698a2-f417-46be-a245-088d7a9a5ac5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.421478 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a03698a2-f417-46be-a245-088d7a9a5ac5-kube-api-access-8ql92" (OuterVolumeSpecName: "kube-api-access-8ql92") pod "a03698a2-f417-46be-a245-088d7a9a5ac5" (UID: "a03698a2-f417-46be-a245-088d7a9a5ac5"). InnerVolumeSpecName "kube-api-access-8ql92". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.514614 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ql92\" (UniqueName: \"kubernetes.io/projected/a03698a2-f417-46be-a245-088d7a9a5ac5-kube-api-access-8ql92\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.514654 4949 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.564441 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-util" (OuterVolumeSpecName: "util") pod "a03698a2-f417-46be-a245-088d7a9a5ac5" (UID: "a03698a2-f417-46be-a245-088d7a9a5ac5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:18:43 crc kubenswrapper[4949]: I0216 11:18:43.617062 4949 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a03698a2-f417-46be-a245-088d7a9a5ac5-util\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:44 crc kubenswrapper[4949]: I0216 11:18:44.116064 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" event={"ID":"a03698a2-f417-46be-a245-088d7a9a5ac5","Type":"ContainerDied","Data":"f3585b60652ac56d530af7e1abdba650db63931ca921a354fe199733b8a39432"} Feb 16 11:18:44 crc kubenswrapper[4949]: I0216 11:18:44.116217 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3585b60652ac56d530af7e1abdba650db63931ca921a354fe199733b8a39432" Feb 16 11:18:44 crc kubenswrapper[4949]: I0216 11:18:44.116243 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8" Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.458250 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gfr2q"] Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.459077 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovn-controller" containerID="cri-o://4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477" gracePeriod=30 Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.459178 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="nbdb" containerID="cri-o://e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080" gracePeriod=30 Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.459228 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="sbdb" containerID="cri-o://47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795" gracePeriod=30 Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.459317 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovn-acl-logging" containerID="cri-o://6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9" gracePeriod=30 Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.459207 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kube-rbac-proxy-node" containerID="cri-o://c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c" gracePeriod=30 Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.459190 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="northd" containerID="cri-o://b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c" gracePeriod=30 Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.459190 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4" gracePeriod=30 Feb 16 11:18:48 crc kubenswrapper[4949]: I0216 11:18:48.526754 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" containerID="cri-o://54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309" gracePeriod=30 Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.147903 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/2.log" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.148342 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/1.log" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.148399 4949 generic.go:334] "Generic (PLEG): container finished" podID="3e42a398-f83a-4463-9ab7-4e093e80d744" containerID="9fc6653cc53f85a17b85cce7ecc6bfbaf249773cba879f6752ec7d6e9b4cf323" exitCode=2 Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.148461 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jsmls" event={"ID":"3e42a398-f83a-4463-9ab7-4e093e80d744","Type":"ContainerDied","Data":"9fc6653cc53f85a17b85cce7ecc6bfbaf249773cba879f6752ec7d6e9b4cf323"} Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.148512 4949 scope.go:117] "RemoveContainer" containerID="e24db1cec1807881a31cfb6501695acefa892c247aa78d342eb43cb6a9ed32ee" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.149286 4949 scope.go:117] "RemoveContainer" containerID="9fc6653cc53f85a17b85cce7ecc6bfbaf249773cba879f6752ec7d6e9b4cf323" Feb 16 11:18:49 crc kubenswrapper[4949]: E0216 11:18:49.149582 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-jsmls_openshift-multus(3e42a398-f83a-4463-9ab7-4e093e80d744)\"" pod="openshift-multus/multus-jsmls" podUID="3e42a398-f83a-4463-9ab7-4e093e80d744" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.153871 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovnkube-controller/3.log" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.155899 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovn-acl-logging/0.log" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.159950 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovn-controller/0.log" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160622 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309" exitCode=0 Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160690 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795" exitCode=0 Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160703 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080" exitCode=0 Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160714 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c" exitCode=0 Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160724 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9" exitCode=143 Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160733 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477" exitCode=143 Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160765 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309"} Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160815 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795"} Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160834 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080"} Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160849 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c"} Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160885 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9"} Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.160906 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477"} Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.171250 4949 scope.go:117] "RemoveContainer" containerID="1bd5bfd89e4acde3a8bfaf0f75ea8b729c29cb7eb095d232605689ae81cca223" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.735284 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovn-acl-logging/0.log" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.736445 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovn-controller/0.log" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.737115 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918404 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918466 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-kubelet\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918502 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-systemd\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918604 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-bin\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918661 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-script-lib\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918690 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-ovn\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918712 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-var-lib-openvswitch\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918730 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-netd\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918751 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-openvswitch\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918789 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovn-node-metrics-cert\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918813 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-config\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918810 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918837 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-env-overrides\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918915 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-slash\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918947 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-ovn-kubernetes\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918972 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-netns\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.918997 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-etc-openvswitch\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919049 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzd7d\" (UniqueName: \"kubernetes.io/projected/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-kube-api-access-qzd7d\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919074 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-log-socket\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919099 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-systemd-units\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919129 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-node-log\") pod \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\" (UID: \"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d\") " Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919336 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919416 4949 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-bin\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919441 4949 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919477 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919510 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919608 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919657 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919685 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919708 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.919727 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.920974 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.921017 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.921044 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-slash" (OuterVolumeSpecName: "host-slash") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.921067 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.921091 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.921113 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-log-socket" (OuterVolumeSpecName: "log-socket") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.923338 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-node-log" (OuterVolumeSpecName: "node-log") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.923521 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.930641 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.930921 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-kube-api-access-qzd7d" (OuterVolumeSpecName: "kube-api-access-qzd7d") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "kube-api-access-qzd7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:18:49 crc kubenswrapper[4949]: I0216 11:18:49.951758 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" (UID: "3f545ae8-1b14-4abd-b4ea-844f6ae7b54d"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008025 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dbvtp"] Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008324 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kube-rbac-proxy-ovn-metrics" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008347 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kube-rbac-proxy-ovn-metrics" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008360 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kube-rbac-proxy-node" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008369 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kube-rbac-proxy-node" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008383 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerName="pull" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008392 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerName="pull" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008400 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovn-acl-logging" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008408 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovn-acl-logging" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008420 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kubecfg-setup" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008428 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kubecfg-setup" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008438 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008447 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008455 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008462 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008470 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="nbdb" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008478 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="nbdb" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008488 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovn-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008495 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovn-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008504 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerName="extract" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008513 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerName="extract" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008525 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="sbdb" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008532 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="sbdb" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008543 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerName="util" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008552 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerName="util" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008565 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008572 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008582 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="northd" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008590 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="northd" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008717 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008728 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a03698a2-f417-46be-a245-088d7a9a5ac5" containerName="extract" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008738 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="nbdb" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008746 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008753 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kube-rbac-proxy-node" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008766 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="sbdb" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008776 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovn-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008786 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008794 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="kube-rbac-proxy-ovn-metrics" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008805 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="northd" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008816 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovn-acl-logging" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.008932 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.008941 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.009063 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.009076 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.009238 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.009248 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerName="ovnkube-controller" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.011407 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.020951 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovnkube-script-lib\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021382 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovnkube-config\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021428 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-etc-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021455 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021509 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-var-lib-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021548 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovn-node-metrics-cert\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021605 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-slash\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021634 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-cni-netd\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021664 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bqjq\" (UniqueName: \"kubernetes.io/projected/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-kube-api-access-4bqjq\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021691 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021717 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-node-log\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021896 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-ovn\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.021977 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-env-overrides\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022006 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-log-socket\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022045 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-systemd\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022076 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-kubelet\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022120 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-systemd-units\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022204 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-cni-bin\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022335 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-run-netns\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022416 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-run-ovn-kubernetes\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022657 4949 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022725 4949 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022744 4949 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022755 4949 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-cni-netd\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022767 4949 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022777 4949 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022791 4949 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022803 4949 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-slash\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022815 4949 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022825 4949 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-run-netns\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022834 4949 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022848 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzd7d\" (UniqueName: \"kubernetes.io/projected/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-kube-api-access-qzd7d\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022859 4949 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-log-socket\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022869 4949 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-systemd-units\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022879 4949 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-node-log\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022905 4949 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022919 4949 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-host-kubelet\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.022930 4949 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d-run-systemd\") on node \"crc\" DevicePath \"\"" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125072 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-kubelet\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125141 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-systemd-units\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125184 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-cni-bin\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125227 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-run-netns\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125264 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-run-ovn-kubernetes\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125294 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovnkube-script-lib\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125314 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovnkube-config\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125317 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-systemd-units\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125387 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-run-ovn-kubernetes\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125429 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-run-netns\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125403 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-etc-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125338 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-etc-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125345 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-cni-bin\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125506 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125552 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-var-lib-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125618 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovn-node-metrics-cert\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125657 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-slash\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125676 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-cni-netd\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125703 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bqjq\" (UniqueName: \"kubernetes.io/projected/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-kube-api-access-4bqjq\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125730 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125757 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-node-log\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125802 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-ovn\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125827 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-env-overrides\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125852 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-log-socket\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.125868 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-systemd\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126006 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-systemd\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126034 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126057 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-var-lib-openvswitch\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126481 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovnkube-script-lib\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126555 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126591 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovnkube-config\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126608 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-slash\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126635 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-cni-netd\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126676 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-run-ovn\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.126719 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-node-log\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.127076 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-log-socket\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.127118 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-host-kubelet\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.127136 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-env-overrides\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.131909 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-ovn-node-metrics-cert\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.151197 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bqjq\" (UniqueName: \"kubernetes.io/projected/7f63377e-69a2-4dee-95d5-6e77a40c2c6e-kube-api-access-4bqjq\") pod \"ovnkube-node-dbvtp\" (UID: \"7f63377e-69a2-4dee-95d5-6e77a40c2c6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.168709 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/2.log" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.174110 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovn-acl-logging/0.log" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.174643 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gfr2q_3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/ovn-controller/0.log" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.174986 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4" exitCode=0 Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.175019 4949 generic.go:334] "Generic (PLEG): container finished" podID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" containerID="c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c" exitCode=0 Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.175057 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4"} Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.175098 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c"} Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.175108 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" event={"ID":"3f545ae8-1b14-4abd-b4ea-844f6ae7b54d","Type":"ContainerDied","Data":"a2473bb4251cdf5da2ce75e2bdbb3fdb2cda10e15b5e4becefdd76b7c7f4cf34"} Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.175129 4949 scope.go:117] "RemoveContainer" containerID="54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.175345 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gfr2q" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.204599 4949 scope.go:117] "RemoveContainer" containerID="47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.220197 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gfr2q"] Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.224977 4949 scope.go:117] "RemoveContainer" containerID="e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.233744 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gfr2q"] Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.244562 4949 scope.go:117] "RemoveContainer" containerID="b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.263726 4949 scope.go:117] "RemoveContainer" containerID="a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.279819 4949 scope.go:117] "RemoveContainer" containerID="c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.303369 4949 scope.go:117] "RemoveContainer" containerID="6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.325706 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.352109 4949 scope.go:117] "RemoveContainer" containerID="4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.402129 4949 scope.go:117] "RemoveContainer" containerID="5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.479297 4949 scope.go:117] "RemoveContainer" containerID="54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.482625 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309\": container with ID starting with 54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309 not found: ID does not exist" containerID="54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.482689 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309"} err="failed to get container status \"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309\": rpc error: code = NotFound desc = could not find container \"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309\": container with ID starting with 54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.482731 4949 scope.go:117] "RemoveContainer" containerID="47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.486517 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\": container with ID starting with 47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795 not found: ID does not exist" containerID="47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.486585 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795"} err="failed to get container status \"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\": rpc error: code = NotFound desc = could not find container \"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\": container with ID starting with 47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.486628 4949 scope.go:117] "RemoveContainer" containerID="e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.490483 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\": container with ID starting with e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080 not found: ID does not exist" containerID="e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.490531 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080"} err="failed to get container status \"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\": rpc error: code = NotFound desc = could not find container \"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\": container with ID starting with e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.490562 4949 scope.go:117] "RemoveContainer" containerID="b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.494875 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\": container with ID starting with b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c not found: ID does not exist" containerID="b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.494940 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c"} err="failed to get container status \"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\": rpc error: code = NotFound desc = could not find container \"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\": container with ID starting with b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.494988 4949 scope.go:117] "RemoveContainer" containerID="a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.498504 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\": container with ID starting with a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4 not found: ID does not exist" containerID="a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.498546 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4"} err="failed to get container status \"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\": rpc error: code = NotFound desc = could not find container \"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\": container with ID starting with a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.498578 4949 scope.go:117] "RemoveContainer" containerID="c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.502140 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\": container with ID starting with c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c not found: ID does not exist" containerID="c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.502222 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c"} err="failed to get container status \"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\": rpc error: code = NotFound desc = could not find container \"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\": container with ID starting with c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.502262 4949 scope.go:117] "RemoveContainer" containerID="6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.502702 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\": container with ID starting with 6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9 not found: ID does not exist" containerID="6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.502760 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9"} err="failed to get container status \"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\": rpc error: code = NotFound desc = could not find container \"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\": container with ID starting with 6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.502800 4949 scope.go:117] "RemoveContainer" containerID="4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.503138 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\": container with ID starting with 4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477 not found: ID does not exist" containerID="4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.503194 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477"} err="failed to get container status \"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\": rpc error: code = NotFound desc = could not find container \"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\": container with ID starting with 4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.503214 4949 scope.go:117] "RemoveContainer" containerID="5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a" Feb 16 11:18:50 crc kubenswrapper[4949]: E0216 11:18:50.503522 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\": container with ID starting with 5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a not found: ID does not exist" containerID="5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.503570 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a"} err="failed to get container status \"5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\": rpc error: code = NotFound desc = could not find container \"5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\": container with ID starting with 5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.503598 4949 scope.go:117] "RemoveContainer" containerID="54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.507733 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309"} err="failed to get container status \"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309\": rpc error: code = NotFound desc = could not find container \"54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309\": container with ID starting with 54afa3d971f774813afb88c923abd89863a8403b21bd3fdb6da7cd52da107309 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.507790 4949 scope.go:117] "RemoveContainer" containerID="47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.508148 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795"} err="failed to get container status \"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\": rpc error: code = NotFound desc = could not find container \"47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795\": container with ID starting with 47947db986c5433367c6b52570a017def2e4babf72f785111587ca86fd745795 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.508217 4949 scope.go:117] "RemoveContainer" containerID="e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.508488 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080"} err="failed to get container status \"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\": rpc error: code = NotFound desc = could not find container \"e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080\": container with ID starting with e903800152f2a61249dd315080aec7aff0fb0f96a2d687f4903ee211ac061080 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.508516 4949 scope.go:117] "RemoveContainer" containerID="b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.508875 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c"} err="failed to get container status \"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\": rpc error: code = NotFound desc = could not find container \"b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c\": container with ID starting with b36100d71aa9c4c492c12006bd25f19aa6f8e8b39048b6e895acbadf3a3d5b5c not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.508899 4949 scope.go:117] "RemoveContainer" containerID="a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.510682 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4"} err="failed to get container status \"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\": rpc error: code = NotFound desc = could not find container \"a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4\": container with ID starting with a6b376f8bceb83918f978008daff9cea37a309102416bb94a209bf286a7fe3b4 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.510737 4949 scope.go:117] "RemoveContainer" containerID="c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.512456 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c"} err="failed to get container status \"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\": rpc error: code = NotFound desc = could not find container \"c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c\": container with ID starting with c16c3ec1c264807a71bba82352d13478aabcbae9eee409569b1de207fd89131c not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.512485 4949 scope.go:117] "RemoveContainer" containerID="6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.513390 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9"} err="failed to get container status \"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\": rpc error: code = NotFound desc = could not find container \"6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9\": container with ID starting with 6614345cd2b9b48d07439200086ab97d851abf34d3fea2b29a5073f224184ed9 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.513419 4949 scope.go:117] "RemoveContainer" containerID="4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.513841 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477"} err="failed to get container status \"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\": rpc error: code = NotFound desc = could not find container \"4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477\": container with ID starting with 4c30142bc1aa2477b0821702607a40da4bd125f14ce8bdd4d6a555481f4be477 not found: ID does not exist" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.513898 4949 scope.go:117] "RemoveContainer" containerID="5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a" Feb 16 11:18:50 crc kubenswrapper[4949]: I0216 11:18:50.514231 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a"} err="failed to get container status \"5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\": rpc error: code = NotFound desc = could not find container \"5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a\": container with ID starting with 5f8cb34c9f41299ea21b00fdebe50008097d37f17b80085c93892bb5b98b153a not found: ID does not exist" Feb 16 11:18:51 crc kubenswrapper[4949]: I0216 11:18:51.183515 4949 generic.go:334] "Generic (PLEG): container finished" podID="7f63377e-69a2-4dee-95d5-6e77a40c2c6e" containerID="1b5c7996362e8b6505c4d78c1785c545c403f0e33edf5ed49cef9ece467818a3" exitCode=0 Feb 16 11:18:51 crc kubenswrapper[4949]: I0216 11:18:51.184052 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerDied","Data":"1b5c7996362e8b6505c4d78c1785c545c403f0e33edf5ed49cef9ece467818a3"} Feb 16 11:18:51 crc kubenswrapper[4949]: I0216 11:18:51.184087 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"8f55ed55ebb14237d2c342c30efa24b9a76c2f0a4833728743942d0575dc41a9"} Feb 16 11:18:51 crc kubenswrapper[4949]: I0216 11:18:51.268365 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f545ae8-1b14-4abd-b4ea-844f6ae7b54d" path="/var/lib/kubelet/pods/3f545ae8-1b14-4abd-b4ea-844f6ae7b54d/volumes" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.195599 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"5c70160c9268fa1fdd68580f4c181bcc9a3cf3e1415d2b1b3feb4d3b2da78617"} Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.195868 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"14f7c65857f79bb757e5acce418bcea2b3a90847068fe97cf29a0a9fbeedae62"} Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.195883 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"b5f7b9e09077b0c433f51c9e3e105f69ea2aea94ed4d558f203fc33b30910517"} Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.195896 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"8025d9602235fe307503feb3a70e6b8b81eb3dff32d86da09b9e0d171763a94c"} Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.195907 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"32cdd9ed49596473e83d2577547c9cdcaf7fd6b1bcbb2d8f8a81cb8f0cdd1722"} Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.195918 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"2e5caa33dee8461a6588db049cd5d5e6dd8ef29a622acf9c1771c77d0d545f73"} Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.543878 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq"] Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.544962 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.547606 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.547663 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.547732 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-xx7p8" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.586519 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8k56\" (UniqueName: \"kubernetes.io/projected/f5bc5773-497e-40be-ba68-82d49b1fd949-kube-api-access-j8k56\") pod \"obo-prometheus-operator-68bc856cb9-zgpcq\" (UID: \"f5bc5773-497e-40be-ba68-82d49b1fd949\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.674700 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl"] Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.676114 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.679439 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-fp52s" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.683692 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.687948 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8k56\" (UniqueName: \"kubernetes.io/projected/f5bc5773-497e-40be-ba68-82d49b1fd949-kube-api-access-j8k56\") pod \"obo-prometheus-operator-68bc856cb9-zgpcq\" (UID: \"f5bc5773-497e-40be-ba68-82d49b1fd949\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.707301 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq"] Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.708552 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.716883 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8k56\" (UniqueName: \"kubernetes.io/projected/f5bc5773-497e-40be-ba68-82d49b1fd949-kube-api-access-j8k56\") pod \"obo-prometheus-operator-68bc856cb9-zgpcq\" (UID: \"f5bc5773-497e-40be-ba68-82d49b1fd949\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.789561 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3e0ff572-a323-409b-be25-ad0bceff59a5-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl\" (UID: \"3e0ff572-a323-409b-be25-ad0bceff59a5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.789606 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e21a80a9-650f-42d0-9cd4-6aaa334423a3-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq\" (UID: \"e21a80a9-650f-42d0-9cd4-6aaa334423a3\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.789631 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e21a80a9-650f-42d0-9cd4-6aaa334423a3-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq\" (UID: \"e21a80a9-650f-42d0-9cd4-6aaa334423a3\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.789676 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3e0ff572-a323-409b-be25-ad0bceff59a5-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl\" (UID: \"3e0ff572-a323-409b-be25-ad0bceff59a5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.859645 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.874311 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-sr5rv"] Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.875157 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.877661 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.877912 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-v7pd8" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.891384 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e21a80a9-650f-42d0-9cd4-6aaa334423a3-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq\" (UID: \"e21a80a9-650f-42d0-9cd4-6aaa334423a3\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.891776 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsg77\" (UniqueName: \"kubernetes.io/projected/0857cd6c-04ee-449a-88ed-99093185d7f5-kube-api-access-nsg77\") pod \"observability-operator-59bdc8b94-sr5rv\" (UID: \"0857cd6c-04ee-449a-88ed-99093185d7f5\") " pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.891839 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3e0ff572-a323-409b-be25-ad0bceff59a5-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl\" (UID: \"3e0ff572-a323-409b-be25-ad0bceff59a5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.891900 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0857cd6c-04ee-449a-88ed-99093185d7f5-observability-operator-tls\") pod \"observability-operator-59bdc8b94-sr5rv\" (UID: \"0857cd6c-04ee-449a-88ed-99093185d7f5\") " pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.891926 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3e0ff572-a323-409b-be25-ad0bceff59a5-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl\" (UID: \"3e0ff572-a323-409b-be25-ad0bceff59a5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.891942 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e21a80a9-650f-42d0-9cd4-6aaa334423a3-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq\" (UID: \"e21a80a9-650f-42d0-9cd4-6aaa334423a3\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:52 crc kubenswrapper[4949]: E0216 11:18:52.894516 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(85e7168023236a46e2379d73c5ed90b582815e7c1492304a336d4ef8186435f6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:52 crc kubenswrapper[4949]: E0216 11:18:52.894583 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(85e7168023236a46e2379d73c5ed90b582815e7c1492304a336d4ef8186435f6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:52 crc kubenswrapper[4949]: E0216 11:18:52.894604 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(85e7168023236a46e2379d73c5ed90b582815e7c1492304a336d4ef8186435f6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:52 crc kubenswrapper[4949]: E0216 11:18:52.894644 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators(f5bc5773-497e-40be-ba68-82d49b1fd949)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators(f5bc5773-497e-40be-ba68-82d49b1fd949)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(85e7168023236a46e2379d73c5ed90b582815e7c1492304a336d4ef8186435f6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" podUID="f5bc5773-497e-40be-ba68-82d49b1fd949" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.896346 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3e0ff572-a323-409b-be25-ad0bceff59a5-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl\" (UID: \"3e0ff572-a323-409b-be25-ad0bceff59a5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.899842 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3e0ff572-a323-409b-be25-ad0bceff59a5-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl\" (UID: \"3e0ff572-a323-409b-be25-ad0bceff59a5\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.899869 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e21a80a9-650f-42d0-9cd4-6aaa334423a3-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq\" (UID: \"e21a80a9-650f-42d0-9cd4-6aaa334423a3\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.909739 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e21a80a9-650f-42d0-9cd4-6aaa334423a3-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq\" (UID: \"e21a80a9-650f-42d0-9cd4-6aaa334423a3\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.978913 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-2l4xd"] Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.979744 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.981461 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-gscrw" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.993166 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0857cd6c-04ee-449a-88ed-99093185d7f5-observability-operator-tls\") pod \"observability-operator-59bdc8b94-sr5rv\" (UID: \"0857cd6c-04ee-449a-88ed-99093185d7f5\") " pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.993244 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsg77\" (UniqueName: \"kubernetes.io/projected/0857cd6c-04ee-449a-88ed-99093185d7f5-kube-api-access-nsg77\") pod \"observability-operator-59bdc8b94-sr5rv\" (UID: \"0857cd6c-04ee-449a-88ed-99093185d7f5\") " pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.993277 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjwgr\" (UniqueName: \"kubernetes.io/projected/cf4e8b11-ca69-4d30-97d8-935339316048-kube-api-access-jjwgr\") pod \"perses-operator-5bf474d74f-2l4xd\" (UID: \"cf4e8b11-ca69-4d30-97d8-935339316048\") " pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.993310 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/cf4e8b11-ca69-4d30-97d8-935339316048-openshift-service-ca\") pod \"perses-operator-5bf474d74f-2l4xd\" (UID: \"cf4e8b11-ca69-4d30-97d8-935339316048\") " pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.997560 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0857cd6c-04ee-449a-88ed-99093185d7f5-observability-operator-tls\") pod \"observability-operator-59bdc8b94-sr5rv\" (UID: \"0857cd6c-04ee-449a-88ed-99093185d7f5\") " pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:52 crc kubenswrapper[4949]: I0216 11:18:52.998124 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.024463 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(2e9f5fdb03379702ee62c493a5d3a54d27bd02256860ac7cfb0964acce4935bd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.024546 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(2e9f5fdb03379702ee62c493a5d3a54d27bd02256860ac7cfb0964acce4935bd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.024577 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(2e9f5fdb03379702ee62c493a5d3a54d27bd02256860ac7cfb0964acce4935bd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:53 crc kubenswrapper[4949]: I0216 11:18:53.024788 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsg77\" (UniqueName: \"kubernetes.io/projected/0857cd6c-04ee-449a-88ed-99093185d7f5-kube-api-access-nsg77\") pod \"observability-operator-59bdc8b94-sr5rv\" (UID: \"0857cd6c-04ee-449a-88ed-99093185d7f5\") " pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.024653 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators(3e0ff572-a323-409b-be25-ad0bceff59a5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators(3e0ff572-a323-409b-be25-ad0bceff59a5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(2e9f5fdb03379702ee62c493a5d3a54d27bd02256860ac7cfb0964acce4935bd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" podUID="3e0ff572-a323-409b-be25-ad0bceff59a5" Feb 16 11:18:53 crc kubenswrapper[4949]: I0216 11:18:53.048515 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.067964 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(ada4e7703078c98bdb951655e194b57c3f57fdc2ceb3370ba62dd28e8ee7212b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.068048 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(ada4e7703078c98bdb951655e194b57c3f57fdc2ceb3370ba62dd28e8ee7212b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.068072 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(ada4e7703078c98bdb951655e194b57c3f57fdc2ceb3370ba62dd28e8ee7212b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.068131 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators(e21a80a9-650f-42d0-9cd4-6aaa334423a3)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators(e21a80a9-650f-42d0-9cd4-6aaa334423a3)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(ada4e7703078c98bdb951655e194b57c3f57fdc2ceb3370ba62dd28e8ee7212b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" podUID="e21a80a9-650f-42d0-9cd4-6aaa334423a3" Feb 16 11:18:53 crc kubenswrapper[4949]: I0216 11:18:53.094460 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjwgr\" (UniqueName: \"kubernetes.io/projected/cf4e8b11-ca69-4d30-97d8-935339316048-kube-api-access-jjwgr\") pod \"perses-operator-5bf474d74f-2l4xd\" (UID: \"cf4e8b11-ca69-4d30-97d8-935339316048\") " pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:53 crc kubenswrapper[4949]: I0216 11:18:53.094528 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/cf4e8b11-ca69-4d30-97d8-935339316048-openshift-service-ca\") pod \"perses-operator-5bf474d74f-2l4xd\" (UID: \"cf4e8b11-ca69-4d30-97d8-935339316048\") " pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:53 crc kubenswrapper[4949]: I0216 11:18:53.095670 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/cf4e8b11-ca69-4d30-97d8-935339316048-openshift-service-ca\") pod \"perses-operator-5bf474d74f-2l4xd\" (UID: \"cf4e8b11-ca69-4d30-97d8-935339316048\") " pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:53 crc kubenswrapper[4949]: I0216 11:18:53.112584 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjwgr\" (UniqueName: \"kubernetes.io/projected/cf4e8b11-ca69-4d30-97d8-935339316048-kube-api-access-jjwgr\") pod \"perses-operator-5bf474d74f-2l4xd\" (UID: \"cf4e8b11-ca69-4d30-97d8-935339316048\") " pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:53 crc kubenswrapper[4949]: I0216 11:18:53.261270 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.280117 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(9a103d326ab1ac60c9db7910a0594fbc11456989de7c4385011a407bd6c19dbb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.280195 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(9a103d326ab1ac60c9db7910a0594fbc11456989de7c4385011a407bd6c19dbb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.280217 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(9a103d326ab1ac60c9db7910a0594fbc11456989de7c4385011a407bd6c19dbb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.280267 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-sr5rv_openshift-operators(0857cd6c-04ee-449a-88ed-99093185d7f5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-sr5rv_openshift-operators(0857cd6c-04ee-449a-88ed-99093185d7f5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(9a103d326ab1ac60c9db7910a0594fbc11456989de7c4385011a407bd6c19dbb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" podUID="0857cd6c-04ee-449a-88ed-99093185d7f5" Feb 16 11:18:53 crc kubenswrapper[4949]: I0216 11:18:53.293929 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.318793 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(5962f30dbc33b8c7ccb530006244b2693cd370d662e17bd0aaf8208fa27ea473): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.318860 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(5962f30dbc33b8c7ccb530006244b2693cd370d662e17bd0aaf8208fa27ea473): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.318883 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(5962f30dbc33b8c7ccb530006244b2693cd370d662e17bd0aaf8208fa27ea473): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:53 crc kubenswrapper[4949]: E0216 11:18:53.318926 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-2l4xd_openshift-operators(cf4e8b11-ca69-4d30-97d8-935339316048)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-2l4xd_openshift-operators(cf4e8b11-ca69-4d30-97d8-935339316048)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(5962f30dbc33b8c7ccb530006244b2693cd370d662e17bd0aaf8208fa27ea473): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" podUID="cf4e8b11-ca69-4d30-97d8-935339316048" Feb 16 11:18:55 crc kubenswrapper[4949]: I0216 11:18:55.244411 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"559f003d18c28241ef944a469743d353c0c75e6dc5f185432f76861735c4877f"} Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.257552 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" event={"ID":"7f63377e-69a2-4dee-95d5-6e77a40c2c6e","Type":"ContainerStarted","Data":"ce4a8deb3428441f834f08b56e13b4cd14cfd6aae39bf0a3adbfb79d24c8a215"} Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.258094 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.291916 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" podStartSLOduration=8.291898589 podStartE2EDuration="8.291898589s" podCreationTimestamp="2026-02-16 11:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:18:57.286008215 +0000 UTC m=+726.915342380" watchObservedRunningTime="2026-02-16 11:18:57.291898589 +0000 UTC m=+726.921232754" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.301347 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.633906 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq"] Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.634012 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.634491 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.640623 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-sr5rv"] Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.640742 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.641179 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.645602 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq"] Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.645735 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.646162 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.652331 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-2l4xd"] Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.652702 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.653053 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.682368 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl"] Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.682480 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:57 crc kubenswrapper[4949]: I0216 11:18:57.682905 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.717189 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(dba67d8a4c74869966e46e601da806dea6d3ede868d74f385ba0233f0ed5544f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.717256 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(dba67d8a4c74869966e46e601da806dea6d3ede868d74f385ba0233f0ed5544f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.717287 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(dba67d8a4c74869966e46e601da806dea6d3ede868d74f385ba0233f0ed5544f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.717330 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators(e21a80a9-650f-42d0-9cd4-6aaa334423a3)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators(e21a80a9-650f-42d0-9cd4-6aaa334423a3)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(dba67d8a4c74869966e46e601da806dea6d3ede868d74f385ba0233f0ed5544f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" podUID="e21a80a9-650f-42d0-9cd4-6aaa334423a3" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781143 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(49b3b62944ffa13913a74c3b81376af69ec7cfa3074b6bae194b0866ed0265f7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781266 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(49b3b62944ffa13913a74c3b81376af69ec7cfa3074b6bae194b0866ed0265f7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781298 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(49b3b62944ffa13913a74c3b81376af69ec7cfa3074b6bae194b0866ed0265f7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781356 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators(f5bc5773-497e-40be-ba68-82d49b1fd949)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators(f5bc5773-497e-40be-ba68-82d49b1fd949)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(49b3b62944ffa13913a74c3b81376af69ec7cfa3074b6bae194b0866ed0265f7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" podUID="f5bc5773-497e-40be-ba68-82d49b1fd949" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781622 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(5643f995a51b157c6450999bd38389cd3f7f4a22c931ba0052ccac2db0733023): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781657 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(5643f995a51b157c6450999bd38389cd3f7f4a22c931ba0052ccac2db0733023): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781678 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(5643f995a51b157c6450999bd38389cd3f7f4a22c931ba0052ccac2db0733023): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781710 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-2l4xd_openshift-operators(cf4e8b11-ca69-4d30-97d8-935339316048)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-2l4xd_openshift-operators(cf4e8b11-ca69-4d30-97d8-935339316048)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(5643f995a51b157c6450999bd38389cd3f7f4a22c931ba0052ccac2db0733023): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" podUID="cf4e8b11-ca69-4d30-97d8-935339316048" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781748 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(7533d53da7c569ac234dbd7c9ccd3f26e9635f64560154d5a89c152613d762bf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781771 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(7533d53da7c569ac234dbd7c9ccd3f26e9635f64560154d5a89c152613d762bf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781791 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(7533d53da7c569ac234dbd7c9ccd3f26e9635f64560154d5a89c152613d762bf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.781825 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-sr5rv_openshift-operators(0857cd6c-04ee-449a-88ed-99093185d7f5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-sr5rv_openshift-operators(0857cd6c-04ee-449a-88ed-99093185d7f5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(7533d53da7c569ac234dbd7c9ccd3f26e9635f64560154d5a89c152613d762bf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" podUID="0857cd6c-04ee-449a-88ed-99093185d7f5" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.793023 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(6d091cc4027a11fcbca1bcc147cdf5c8b3ceae53d694b43280b2dbb739093aeb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.793102 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(6d091cc4027a11fcbca1bcc147cdf5c8b3ceae53d694b43280b2dbb739093aeb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.793128 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(6d091cc4027a11fcbca1bcc147cdf5c8b3ceae53d694b43280b2dbb739093aeb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:18:57 crc kubenswrapper[4949]: E0216 11:18:57.793211 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators(3e0ff572-a323-409b-be25-ad0bceff59a5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators(3e0ff572-a323-409b-be25-ad0bceff59a5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(6d091cc4027a11fcbca1bcc147cdf5c8b3ceae53d694b43280b2dbb739093aeb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" podUID="3e0ff572-a323-409b-be25-ad0bceff59a5" Feb 16 11:18:58 crc kubenswrapper[4949]: I0216 11:18:58.262907 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:58 crc kubenswrapper[4949]: I0216 11:18:58.262963 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:18:58 crc kubenswrapper[4949]: I0216 11:18:58.290354 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:19:02 crc kubenswrapper[4949]: I0216 11:19:02.235479 4949 scope.go:117] "RemoveContainer" containerID="9fc6653cc53f85a17b85cce7ecc6bfbaf249773cba879f6752ec7d6e9b4cf323" Feb 16 11:19:02 crc kubenswrapper[4949]: E0216 11:19:02.236339 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-jsmls_openshift-multus(3e42a398-f83a-4463-9ab7-4e093e80d744)\"" pod="openshift-multus/multus-jsmls" podUID="3e42a398-f83a-4463-9ab7-4e093e80d744" Feb 16 11:19:08 crc kubenswrapper[4949]: I0216 11:19:08.234829 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:19:08 crc kubenswrapper[4949]: I0216 11:19:08.236043 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:19:08 crc kubenswrapper[4949]: E0216 11:19:08.278258 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(033257ce7efe409948b0b7e7e7d94d55587c04fb8029218aa3e81acc4693c43a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:19:08 crc kubenswrapper[4949]: E0216 11:19:08.278386 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(033257ce7efe409948b0b7e7e7d94d55587c04fb8029218aa3e81acc4693c43a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:19:08 crc kubenswrapper[4949]: E0216 11:19:08.278442 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(033257ce7efe409948b0b7e7e7d94d55587c04fb8029218aa3e81acc4693c43a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:19:08 crc kubenswrapper[4949]: E0216 11:19:08.278525 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-sr5rv_openshift-operators(0857cd6c-04ee-449a-88ed-99093185d7f5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-sr5rv_openshift-operators(0857cd6c-04ee-449a-88ed-99093185d7f5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-sr5rv_openshift-operators_0857cd6c-04ee-449a-88ed-99093185d7f5_0(033257ce7efe409948b0b7e7e7d94d55587c04fb8029218aa3e81acc4693c43a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" podUID="0857cd6c-04ee-449a-88ed-99093185d7f5" Feb 16 11:19:09 crc kubenswrapper[4949]: I0216 11:19:09.235104 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:19:09 crc kubenswrapper[4949]: I0216 11:19:09.235932 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:19:09 crc kubenswrapper[4949]: I0216 11:19:09.236928 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:19:09 crc kubenswrapper[4949]: I0216 11:19:09.237486 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:19:09 crc kubenswrapper[4949]: E0216 11:19:09.273157 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(7689da32c53cb139285429b3a7730502eaa6f04d2a1172db5caf54cc5c28dc1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:19:09 crc kubenswrapper[4949]: E0216 11:19:09.273293 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(7689da32c53cb139285429b3a7730502eaa6f04d2a1172db5caf54cc5c28dc1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:19:09 crc kubenswrapper[4949]: E0216 11:19:09.273341 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(7689da32c53cb139285429b3a7730502eaa6f04d2a1172db5caf54cc5c28dc1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:19:09 crc kubenswrapper[4949]: E0216 11:19:09.273448 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators(3e0ff572-a323-409b-be25-ad0bceff59a5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators(3e0ff572-a323-409b-be25-ad0bceff59a5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_openshift-operators_3e0ff572-a323-409b-be25-ad0bceff59a5_0(7689da32c53cb139285429b3a7730502eaa6f04d2a1172db5caf54cc5c28dc1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" podUID="3e0ff572-a323-409b-be25-ad0bceff59a5" Feb 16 11:19:09 crc kubenswrapper[4949]: E0216 11:19:09.282829 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(b6d7cee250ba755ec1d75742b8e54768320d5eb889698d11cf9474777451075f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:19:09 crc kubenswrapper[4949]: E0216 11:19:09.282900 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(b6d7cee250ba755ec1d75742b8e54768320d5eb889698d11cf9474777451075f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:19:09 crc kubenswrapper[4949]: E0216 11:19:09.282935 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(b6d7cee250ba755ec1d75742b8e54768320d5eb889698d11cf9474777451075f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:19:09 crc kubenswrapper[4949]: E0216 11:19:09.282989 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators(f5bc5773-497e-40be-ba68-82d49b1fd949)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators(f5bc5773-497e-40be-ba68-82d49b1fd949)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-zgpcq_openshift-operators_f5bc5773-497e-40be-ba68-82d49b1fd949_0(b6d7cee250ba755ec1d75742b8e54768320d5eb889698d11cf9474777451075f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" podUID="f5bc5773-497e-40be-ba68-82d49b1fd949" Feb 16 11:19:13 crc kubenswrapper[4949]: I0216 11:19:13.234835 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:19:13 crc kubenswrapper[4949]: I0216 11:19:13.234981 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:19:13 crc kubenswrapper[4949]: I0216 11:19:13.236474 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:19:13 crc kubenswrapper[4949]: I0216 11:19:13.236476 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:19:13 crc kubenswrapper[4949]: E0216 11:19:13.293470 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(1817a2bbbdbbc8ad9121d661c93042cfcc9964a1f714b82c55fb851a2aae7d3c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:19:13 crc kubenswrapper[4949]: E0216 11:19:13.293574 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(1817a2bbbdbbc8ad9121d661c93042cfcc9964a1f714b82c55fb851a2aae7d3c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:19:13 crc kubenswrapper[4949]: E0216 11:19:13.293613 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(1817a2bbbdbbc8ad9121d661c93042cfcc9964a1f714b82c55fb851a2aae7d3c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:19:13 crc kubenswrapper[4949]: E0216 11:19:13.293690 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-2l4xd_openshift-operators(cf4e8b11-ca69-4d30-97d8-935339316048)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-2l4xd_openshift-operators(cf4e8b11-ca69-4d30-97d8-935339316048)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-2l4xd_openshift-operators_cf4e8b11-ca69-4d30-97d8-935339316048_0(1817a2bbbdbbc8ad9121d661c93042cfcc9964a1f714b82c55fb851a2aae7d3c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" podUID="cf4e8b11-ca69-4d30-97d8-935339316048" Feb 16 11:19:13 crc kubenswrapper[4949]: E0216 11:19:13.304221 4949 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(cbff23fbd6dec66bdd8cb6bfcdfd3822c8bb1a40d8c52fec02e7d84b65d7ea1a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 16 11:19:13 crc kubenswrapper[4949]: E0216 11:19:13.304301 4949 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(cbff23fbd6dec66bdd8cb6bfcdfd3822c8bb1a40d8c52fec02e7d84b65d7ea1a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:19:13 crc kubenswrapper[4949]: E0216 11:19:13.304330 4949 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(cbff23fbd6dec66bdd8cb6bfcdfd3822c8bb1a40d8c52fec02e7d84b65d7ea1a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:19:13 crc kubenswrapper[4949]: E0216 11:19:13.304398 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators(e21a80a9-650f-42d0-9cd4-6aaa334423a3)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators(e21a80a9-650f-42d0-9cd4-6aaa334423a3)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_openshift-operators_e21a80a9-650f-42d0-9cd4-6aaa334423a3_0(cbff23fbd6dec66bdd8cb6bfcdfd3822c8bb1a40d8c52fec02e7d84b65d7ea1a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" podUID="e21a80a9-650f-42d0-9cd4-6aaa334423a3" Feb 16 11:19:16 crc kubenswrapper[4949]: I0216 11:19:16.235158 4949 scope.go:117] "RemoveContainer" containerID="9fc6653cc53f85a17b85cce7ecc6bfbaf249773cba879f6752ec7d6e9b4cf323" Feb 16 11:19:17 crc kubenswrapper[4949]: I0216 11:19:17.381039 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jsmls_3e42a398-f83a-4463-9ab7-4e093e80d744/kube-multus/2.log" Feb 16 11:19:17 crc kubenswrapper[4949]: I0216 11:19:17.381442 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jsmls" event={"ID":"3e42a398-f83a-4463-9ab7-4e093e80d744","Type":"ContainerStarted","Data":"bc16855f87e24ac997882ee0031bd3e0bb3438bbc4e9921e22213ad383c8abd3"} Feb 16 11:19:20 crc kubenswrapper[4949]: I0216 11:19:20.234967 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:19:20 crc kubenswrapper[4949]: I0216 11:19:20.235919 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:19:20 crc kubenswrapper[4949]: I0216 11:19:20.234968 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:19:20 crc kubenswrapper[4949]: I0216 11:19:20.236577 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" Feb 16 11:19:20 crc kubenswrapper[4949]: I0216 11:19:20.369314 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dbvtp" Feb 16 11:19:20 crc kubenswrapper[4949]: I0216 11:19:20.610413 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-sr5rv"] Feb 16 11:19:20 crc kubenswrapper[4949]: I0216 11:19:20.830645 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl"] Feb 16 11:19:20 crc kubenswrapper[4949]: W0216 11:19:20.851498 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e0ff572_a323_409b_be25_ad0bceff59a5.slice/crio-adf67dc64dde2c878e5462ebc47d6d1ec5255c55692336b5bf1c5e3885a6822a WatchSource:0}: Error finding container adf67dc64dde2c878e5462ebc47d6d1ec5255c55692336b5bf1c5e3885a6822a: Status 404 returned error can't find the container with id adf67dc64dde2c878e5462ebc47d6d1ec5255c55692336b5bf1c5e3885a6822a Feb 16 11:19:21 crc kubenswrapper[4949]: I0216 11:19:21.234879 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:19:21 crc kubenswrapper[4949]: I0216 11:19:21.243410 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" Feb 16 11:19:21 crc kubenswrapper[4949]: I0216 11:19:21.427859 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" event={"ID":"3e0ff572-a323-409b-be25-ad0bceff59a5","Type":"ContainerStarted","Data":"adf67dc64dde2c878e5462ebc47d6d1ec5255c55692336b5bf1c5e3885a6822a"} Feb 16 11:19:21 crc kubenswrapper[4949]: I0216 11:19:21.429625 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" event={"ID":"0857cd6c-04ee-449a-88ed-99093185d7f5","Type":"ContainerStarted","Data":"52d9474c0ebff92ae9c3cc776a7dcb95571b416218d3251b45bd5e77ee8954cc"} Feb 16 11:19:21 crc kubenswrapper[4949]: I0216 11:19:21.706053 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq"] Feb 16 11:19:22 crc kubenswrapper[4949]: I0216 11:19:22.439713 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" event={"ID":"f5bc5773-497e-40be-ba68-82d49b1fd949","Type":"ContainerStarted","Data":"c450c654ebac532b82cce41018107550cb66a8c59b69545bfea10af7afe7cf55"} Feb 16 11:19:24 crc kubenswrapper[4949]: I0216 11:19:24.234474 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:19:24 crc kubenswrapper[4949]: I0216 11:19:24.234927 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:19:28 crc kubenswrapper[4949]: I0216 11:19:28.235161 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:19:28 crc kubenswrapper[4949]: I0216 11:19:28.236222 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" Feb 16 11:19:28 crc kubenswrapper[4949]: I0216 11:19:28.407320 4949 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.030269 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-2l4xd"] Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.037233 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq"] Feb 16 11:19:29 crc kubenswrapper[4949]: W0216 11:19:29.040824 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode21a80a9_650f_42d0_9cd4_6aaa334423a3.slice/crio-5822f05a6b38d9b33872c8eea5ac2a9fba66e8f84e41163ba783d18e32fae818 WatchSource:0}: Error finding container 5822f05a6b38d9b33872c8eea5ac2a9fba66e8f84e41163ba783d18e32fae818: Status 404 returned error can't find the container with id 5822f05a6b38d9b33872c8eea5ac2a9fba66e8f84e41163ba783d18e32fae818 Feb 16 11:19:29 crc kubenswrapper[4949]: W0216 11:19:29.041091 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf4e8b11_ca69_4d30_97d8_935339316048.slice/crio-ad4efdde9a0bd3829ddb817c2c27cbe260bcec8ecceabca243ded29609e173cf WatchSource:0}: Error finding container ad4efdde9a0bd3829ddb817c2c27cbe260bcec8ecceabca243ded29609e173cf: Status 404 returned error can't find the container with id ad4efdde9a0bd3829ddb817c2c27cbe260bcec8ecceabca243ded29609e173cf Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.506286 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" event={"ID":"f5bc5773-497e-40be-ba68-82d49b1fd949","Type":"ContainerStarted","Data":"c7372df9f9b095d95c735ad2d84868a623f6de313fe0d277ae88b925ee52e12a"} Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.508307 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" event={"ID":"e21a80a9-650f-42d0-9cd4-6aaa334423a3","Type":"ContainerStarted","Data":"2162bbb30d23be8244fc82cac28337922fd5fa8edbd2b90f1d574e43731f47fc"} Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.508333 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" event={"ID":"e21a80a9-650f-42d0-9cd4-6aaa334423a3","Type":"ContainerStarted","Data":"5822f05a6b38d9b33872c8eea5ac2a9fba66e8f84e41163ba783d18e32fae818"} Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.510389 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" event={"ID":"cf4e8b11-ca69-4d30-97d8-935339316048","Type":"ContainerStarted","Data":"ad4efdde9a0bd3829ddb817c2c27cbe260bcec8ecceabca243ded29609e173cf"} Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.512053 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" event={"ID":"3e0ff572-a323-409b-be25-ad0bceff59a5","Type":"ContainerStarted","Data":"92d3b537a7c41814d6fbdd2a3d0e99369104d010f0a67e2e658447a167a85b40"} Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.518423 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" event={"ID":"0857cd6c-04ee-449a-88ed-99093185d7f5","Type":"ContainerStarted","Data":"376e162888b3b5f56f11cc7dce6ae2ca7dc8685ae5a154af3ce63775d3801746"} Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.518715 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.520891 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.532571 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-zgpcq" podStartSLOduration=30.69333154 podStartE2EDuration="37.532549872s" podCreationTimestamp="2026-02-16 11:18:52 +0000 UTC" firstStartedPulling="2026-02-16 11:19:21.721309741 +0000 UTC m=+751.350643906" lastFinishedPulling="2026-02-16 11:19:28.560528073 +0000 UTC m=+758.189862238" observedRunningTime="2026-02-16 11:19:29.526627807 +0000 UTC m=+759.155961982" watchObservedRunningTime="2026-02-16 11:19:29.532549872 +0000 UTC m=+759.161884037" Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.560128 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl" podStartSLOduration=29.877715585 podStartE2EDuration="37.560105171s" podCreationTimestamp="2026-02-16 11:18:52 +0000 UTC" firstStartedPulling="2026-02-16 11:19:20.855270079 +0000 UTC m=+750.484604244" lastFinishedPulling="2026-02-16 11:19:28.537659665 +0000 UTC m=+758.166993830" observedRunningTime="2026-02-16 11:19:29.552365005 +0000 UTC m=+759.181699170" watchObservedRunningTime="2026-02-16 11:19:29.560105171 +0000 UTC m=+759.189439336" Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.580360 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-sr5rv" podStartSLOduration=29.648625013 podStartE2EDuration="37.580341645s" podCreationTimestamp="2026-02-16 11:18:52 +0000 UTC" firstStartedPulling="2026-02-16 11:19:20.626120496 +0000 UTC m=+750.255454661" lastFinishedPulling="2026-02-16 11:19:28.557837128 +0000 UTC m=+758.187171293" observedRunningTime="2026-02-16 11:19:29.577227758 +0000 UTC m=+759.206561923" watchObservedRunningTime="2026-02-16 11:19:29.580341645 +0000 UTC m=+759.209675810" Feb 16 11:19:29 crc kubenswrapper[4949]: I0216 11:19:29.615107 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq" podStartSLOduration=37.615086305 podStartE2EDuration="37.615086305s" podCreationTimestamp="2026-02-16 11:18:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:19:29.61241108 +0000 UTC m=+759.241745245" watchObservedRunningTime="2026-02-16 11:19:29.615086305 +0000 UTC m=+759.244420480" Feb 16 11:19:32 crc kubenswrapper[4949]: I0216 11:19:32.538440 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" event={"ID":"cf4e8b11-ca69-4d30-97d8-935339316048","Type":"ContainerStarted","Data":"33e735abc99e9047c8e76b280781992650d5694b8f56fa7881fd20343781199f"} Feb 16 11:19:32 crc kubenswrapper[4949]: I0216 11:19:32.540361 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:19:32 crc kubenswrapper[4949]: I0216 11:19:32.566545 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" podStartSLOduration=38.227065188 podStartE2EDuration="40.566511448s" podCreationTimestamp="2026-02-16 11:18:52 +0000 UTC" firstStartedPulling="2026-02-16 11:19:29.044987549 +0000 UTC m=+758.674321724" lastFinishedPulling="2026-02-16 11:19:31.384433819 +0000 UTC m=+761.013767984" observedRunningTime="2026-02-16 11:19:32.558146835 +0000 UTC m=+762.187481020" watchObservedRunningTime="2026-02-16 11:19:32.566511448 +0000 UTC m=+762.195845613" Feb 16 11:19:34 crc kubenswrapper[4949]: I0216 11:19:34.550036 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:19:34 crc kubenswrapper[4949]: I0216 11:19:34.550364 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.853888 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-skb68"] Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.855195 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-skb68" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.858445 4949 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-5jtdl" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.858449 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.858929 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.866991 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-vkmw4"] Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.868305 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-vkmw4" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.870597 4949 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-cpgqq" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.876237 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-cdp5z"] Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.877522 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.881594 4949 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-7fp2z" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.899225 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-skb68"] Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.930216 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-cdp5z"] Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.944431 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-vkmw4"] Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.996743 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wb2n\" (UniqueName: \"kubernetes.io/projected/cd46089f-617c-42c5-b7af-c083934b98f8-kube-api-access-5wb2n\") pod \"cert-manager-cainjector-cf98fcc89-skb68\" (UID: \"cd46089f-617c-42c5-b7af-c083934b98f8\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-skb68" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.997034 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwg5m\" (UniqueName: \"kubernetes.io/projected/61c29aa4-a4a6-439c-9a73-fb9e237a09a2-kube-api-access-pwg5m\") pod \"cert-manager-webhook-687f57d79b-cdp5z\" (UID: \"61c29aa4-a4a6-439c-9a73-fb9e237a09a2\") " pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" Feb 16 11:19:35 crc kubenswrapper[4949]: I0216 11:19:35.997101 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rdgd\" (UniqueName: \"kubernetes.io/projected/2337a301-ac3a-4802-8b4a-0cbb713cb547-kube-api-access-7rdgd\") pod \"cert-manager-858654f9db-vkmw4\" (UID: \"2337a301-ac3a-4802-8b4a-0cbb713cb547\") " pod="cert-manager/cert-manager-858654f9db-vkmw4" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.098430 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rdgd\" (UniqueName: \"kubernetes.io/projected/2337a301-ac3a-4802-8b4a-0cbb713cb547-kube-api-access-7rdgd\") pod \"cert-manager-858654f9db-vkmw4\" (UID: \"2337a301-ac3a-4802-8b4a-0cbb713cb547\") " pod="cert-manager/cert-manager-858654f9db-vkmw4" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.098504 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wb2n\" (UniqueName: \"kubernetes.io/projected/cd46089f-617c-42c5-b7af-c083934b98f8-kube-api-access-5wb2n\") pod \"cert-manager-cainjector-cf98fcc89-skb68\" (UID: \"cd46089f-617c-42c5-b7af-c083934b98f8\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-skb68" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.098602 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwg5m\" (UniqueName: \"kubernetes.io/projected/61c29aa4-a4a6-439c-9a73-fb9e237a09a2-kube-api-access-pwg5m\") pod \"cert-manager-webhook-687f57d79b-cdp5z\" (UID: \"61c29aa4-a4a6-439c-9a73-fb9e237a09a2\") " pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.121999 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wb2n\" (UniqueName: \"kubernetes.io/projected/cd46089f-617c-42c5-b7af-c083934b98f8-kube-api-access-5wb2n\") pod \"cert-manager-cainjector-cf98fcc89-skb68\" (UID: \"cd46089f-617c-42c5-b7af-c083934b98f8\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-skb68" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.126212 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwg5m\" (UniqueName: \"kubernetes.io/projected/61c29aa4-a4a6-439c-9a73-fb9e237a09a2-kube-api-access-pwg5m\") pod \"cert-manager-webhook-687f57d79b-cdp5z\" (UID: \"61c29aa4-a4a6-439c-9a73-fb9e237a09a2\") " pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.129069 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rdgd\" (UniqueName: \"kubernetes.io/projected/2337a301-ac3a-4802-8b4a-0cbb713cb547-kube-api-access-7rdgd\") pod \"cert-manager-858654f9db-vkmw4\" (UID: \"2337a301-ac3a-4802-8b4a-0cbb713cb547\") " pod="cert-manager/cert-manager-858654f9db-vkmw4" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.194886 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-skb68" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.212015 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-vkmw4" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.225679 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.690671 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-skb68"] Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.694451 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-vkmw4"] Feb 16 11:19:36 crc kubenswrapper[4949]: W0216 11:19:36.700341 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd46089f_617c_42c5_b7af_c083934b98f8.slice/crio-0cfb39b9ffea9bfaaf6d9ef52c4df24a0b67cb173eab5effd4664b053ff4eace WatchSource:0}: Error finding container 0cfb39b9ffea9bfaaf6d9ef52c4df24a0b67cb173eab5effd4664b053ff4eace: Status 404 returned error can't find the container with id 0cfb39b9ffea9bfaaf6d9ef52c4df24a0b67cb173eab5effd4664b053ff4eace Feb 16 11:19:36 crc kubenswrapper[4949]: W0216 11:19:36.701196 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2337a301_ac3a_4802_8b4a_0cbb713cb547.slice/crio-ec67883bef0450bb8c2e5ca76dada5c16746921e01f79f0e1cfb8ffd026e4ed5 WatchSource:0}: Error finding container ec67883bef0450bb8c2e5ca76dada5c16746921e01f79f0e1cfb8ffd026e4ed5: Status 404 returned error can't find the container with id ec67883bef0450bb8c2e5ca76dada5c16746921e01f79f0e1cfb8ffd026e4ed5 Feb 16 11:19:36 crc kubenswrapper[4949]: I0216 11:19:36.766119 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-cdp5z"] Feb 16 11:19:36 crc kubenswrapper[4949]: W0216 11:19:36.770535 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod61c29aa4_a4a6_439c_9a73_fb9e237a09a2.slice/crio-6a2d602967e7f229b14fbfbeed66a59be19c7f9ee984a52e26bf47e9a4b1dbcb WatchSource:0}: Error finding container 6a2d602967e7f229b14fbfbeed66a59be19c7f9ee984a52e26bf47e9a4b1dbcb: Status 404 returned error can't find the container with id 6a2d602967e7f229b14fbfbeed66a59be19c7f9ee984a52e26bf47e9a4b1dbcb Feb 16 11:19:37 crc kubenswrapper[4949]: I0216 11:19:37.571189 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" event={"ID":"61c29aa4-a4a6-439c-9a73-fb9e237a09a2","Type":"ContainerStarted","Data":"6a2d602967e7f229b14fbfbeed66a59be19c7f9ee984a52e26bf47e9a4b1dbcb"} Feb 16 11:19:37 crc kubenswrapper[4949]: I0216 11:19:37.572360 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-skb68" event={"ID":"cd46089f-617c-42c5-b7af-c083934b98f8","Type":"ContainerStarted","Data":"0cfb39b9ffea9bfaaf6d9ef52c4df24a0b67cb173eab5effd4664b053ff4eace"} Feb 16 11:19:37 crc kubenswrapper[4949]: I0216 11:19:37.573520 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-vkmw4" event={"ID":"2337a301-ac3a-4802-8b4a-0cbb713cb547","Type":"ContainerStarted","Data":"ec67883bef0450bb8c2e5ca76dada5c16746921e01f79f0e1cfb8ffd026e4ed5"} Feb 16 11:19:40 crc kubenswrapper[4949]: I0216 11:19:40.597498 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-skb68" event={"ID":"cd46089f-617c-42c5-b7af-c083934b98f8","Type":"ContainerStarted","Data":"30c9125d2a26091d3550d2cb77d983ddc6f37fd3e1d63072ee32e4e818451387"} Feb 16 11:19:40 crc kubenswrapper[4949]: I0216 11:19:40.600672 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-vkmw4" event={"ID":"2337a301-ac3a-4802-8b4a-0cbb713cb547","Type":"ContainerStarted","Data":"36738eafe3cef2cb16d83192af0a35aaafb77000ab345f0f1cca8137efb4b30e"} Feb 16 11:19:40 crc kubenswrapper[4949]: I0216 11:19:40.612957 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-skb68" podStartSLOduration=2.672127053 podStartE2EDuration="5.612929301s" podCreationTimestamp="2026-02-16 11:19:35 +0000 UTC" firstStartedPulling="2026-02-16 11:19:36.702232203 +0000 UTC m=+766.331566378" lastFinishedPulling="2026-02-16 11:19:39.643034461 +0000 UTC m=+769.272368626" observedRunningTime="2026-02-16 11:19:40.610344029 +0000 UTC m=+770.239678204" watchObservedRunningTime="2026-02-16 11:19:40.612929301 +0000 UTC m=+770.242263466" Feb 16 11:19:40 crc kubenswrapper[4949]: I0216 11:19:40.629939 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-vkmw4" podStartSLOduration=2.7004687450000002 podStartE2EDuration="5.629917235s" podCreationTimestamp="2026-02-16 11:19:35 +0000 UTC" firstStartedPulling="2026-02-16 11:19:36.705084253 +0000 UTC m=+766.334418418" lastFinishedPulling="2026-02-16 11:19:39.634532753 +0000 UTC m=+769.263866908" observedRunningTime="2026-02-16 11:19:40.626408967 +0000 UTC m=+770.255743152" watchObservedRunningTime="2026-02-16 11:19:40.629917235 +0000 UTC m=+770.259251400" Feb 16 11:19:41 crc kubenswrapper[4949]: I0216 11:19:41.612237 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" event={"ID":"61c29aa4-a4a6-439c-9a73-fb9e237a09a2","Type":"ContainerStarted","Data":"74438b1cfa68fe5de86542328ba5279c3afbbbf6d9b4464dfd589298885c87e7"} Feb 16 11:19:41 crc kubenswrapper[4949]: I0216 11:19:41.613200 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" Feb 16 11:19:41 crc kubenswrapper[4949]: I0216 11:19:41.640976 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" podStartSLOduration=2.502169391 podStartE2EDuration="6.640945162s" podCreationTimestamp="2026-02-16 11:19:35 +0000 UTC" firstStartedPulling="2026-02-16 11:19:36.772609877 +0000 UTC m=+766.401944062" lastFinishedPulling="2026-02-16 11:19:40.911385668 +0000 UTC m=+770.540719833" observedRunningTime="2026-02-16 11:19:41.637825465 +0000 UTC m=+771.267159660" watchObservedRunningTime="2026-02-16 11:19:41.640945162 +0000 UTC m=+771.270279357" Feb 16 11:19:43 crc kubenswrapper[4949]: I0216 11:19:43.296790 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-2l4xd" Feb 16 11:19:46 crc kubenswrapper[4949]: I0216 11:19:46.234021 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-cdp5z" Feb 16 11:20:04 crc kubenswrapper[4949]: I0216 11:20:04.550989 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:20:04 crc kubenswrapper[4949]: I0216 11:20:04.553390 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.632539 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd"] Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.634209 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.636938 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.645474 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd"] Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.712152 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-util\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.712298 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vkzs\" (UniqueName: \"kubernetes.io/projected/70c09ca8-bdb5-446a-8b4f-f57accb479a5-kube-api-access-9vkzs\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.712775 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-bundle\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.814593 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-bundle\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.814697 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-util\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.814729 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vkzs\" (UniqueName: \"kubernetes.io/projected/70c09ca8-bdb5-446a-8b4f-f57accb479a5-kube-api-access-9vkzs\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.815498 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-util\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.815507 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-bundle\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.843103 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vkzs\" (UniqueName: \"kubernetes.io/projected/70c09ca8-bdb5-446a-8b4f-f57accb479a5-kube-api-access-9vkzs\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:07 crc kubenswrapper[4949]: I0216 11:20:07.949913 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.016755 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8"] Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.017999 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.030053 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8"] Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.118380 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk4fs\" (UniqueName: \"kubernetes.io/projected/97f849a0-661e-46b3-981e-3933736a1cce-kube-api-access-nk4fs\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.118954 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-util\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.119289 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-bundle\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.220828 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-bundle\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.220917 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk4fs\" (UniqueName: \"kubernetes.io/projected/97f849a0-661e-46b3-981e-3933736a1cce-kube-api-access-nk4fs\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.220948 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-util\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.221444 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-bundle\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.221478 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-util\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.241581 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk4fs\" (UniqueName: \"kubernetes.io/projected/97f849a0-661e-46b3-981e-3933736a1cce-kube-api-access-nk4fs\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.364464 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.484042 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd"] Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.675990 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8"] Feb 16 11:20:08 crc kubenswrapper[4949]: W0216 11:20:08.683721 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97f849a0_661e_46b3_981e_3933736a1cce.slice/crio-97ad33b9a6a1701663a27d30675ea10d89220dc239d0d59ae63f704f6184cf0b WatchSource:0}: Error finding container 97ad33b9a6a1701663a27d30675ea10d89220dc239d0d59ae63f704f6184cf0b: Status 404 returned error can't find the container with id 97ad33b9a6a1701663a27d30675ea10d89220dc239d0d59ae63f704f6184cf0b Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.859468 4949 generic.go:334] "Generic (PLEG): container finished" podID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerID="62a08dcc3fa02921373a0e96f66d160b5d2b097669a233f2cffb02660472b068" exitCode=0 Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.859552 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" event={"ID":"70c09ca8-bdb5-446a-8b4f-f57accb479a5","Type":"ContainerDied","Data":"62a08dcc3fa02921373a0e96f66d160b5d2b097669a233f2cffb02660472b068"} Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.859580 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" event={"ID":"70c09ca8-bdb5-446a-8b4f-f57accb479a5","Type":"ContainerStarted","Data":"5132e39b89aa18afa5cd36ddfe59933311d7b14107d56f74ee3a5841817a04d9"} Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.862510 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" event={"ID":"97f849a0-661e-46b3-981e-3933736a1cce","Type":"ContainerStarted","Data":"69683db19774b74aec4fd9b5849793c75ca8f78f91f1a89770de06c373efa5cc"} Feb 16 11:20:08 crc kubenswrapper[4949]: I0216 11:20:08.862550 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" event={"ID":"97f849a0-661e-46b3-981e-3933736a1cce","Type":"ContainerStarted","Data":"97ad33b9a6a1701663a27d30675ea10d89220dc239d0d59ae63f704f6184cf0b"} Feb 16 11:20:09 crc kubenswrapper[4949]: I0216 11:20:09.871673 4949 generic.go:334] "Generic (PLEG): container finished" podID="97f849a0-661e-46b3-981e-3933736a1cce" containerID="69683db19774b74aec4fd9b5849793c75ca8f78f91f1a89770de06c373efa5cc" exitCode=0 Feb 16 11:20:09 crc kubenswrapper[4949]: I0216 11:20:09.871756 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" event={"ID":"97f849a0-661e-46b3-981e-3933736a1cce","Type":"ContainerDied","Data":"69683db19774b74aec4fd9b5849793c75ca8f78f91f1a89770de06c373efa5cc"} Feb 16 11:20:10 crc kubenswrapper[4949]: I0216 11:20:10.879063 4949 generic.go:334] "Generic (PLEG): container finished" podID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerID="b52adb9535479f99d979463af744d02b4f401ad70188fadfbbb13d1cd9482dc4" exitCode=0 Feb 16 11:20:10 crc kubenswrapper[4949]: I0216 11:20:10.879404 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" event={"ID":"70c09ca8-bdb5-446a-8b4f-f57accb479a5","Type":"ContainerDied","Data":"b52adb9535479f99d979463af744d02b4f401ad70188fadfbbb13d1cd9482dc4"} Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.371885 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l5qft"] Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.373877 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.391418 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l5qft"] Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.471375 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-catalog-content\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.471471 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-utilities\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.471500 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhftc\" (UniqueName: \"kubernetes.io/projected/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-kube-api-access-fhftc\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.573347 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-catalog-content\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.573463 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-utilities\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.573503 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhftc\" (UniqueName: \"kubernetes.io/projected/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-kube-api-access-fhftc\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.573940 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-catalog-content\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.573972 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-utilities\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.596384 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhftc\" (UniqueName: \"kubernetes.io/projected/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-kube-api-access-fhftc\") pod \"redhat-operators-l5qft\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.687139 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.894714 4949 generic.go:334] "Generic (PLEG): container finished" podID="97f849a0-661e-46b3-981e-3933736a1cce" containerID="8ce5a27cc2b612abaca62aae38ff036e3dcc077940010651224c8dd94bf5a942" exitCode=0 Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.895381 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" event={"ID":"97f849a0-661e-46b3-981e-3933736a1cce","Type":"ContainerDied","Data":"8ce5a27cc2b612abaca62aae38ff036e3dcc077940010651224c8dd94bf5a942"} Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.897886 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" event={"ID":"70c09ca8-bdb5-446a-8b4f-f57accb479a5","Type":"ContainerDied","Data":"dfc83cffd0ca433a0e3a48f182cd59776e40fe084c63797fb80dae4519750ecb"} Feb 16 11:20:11 crc kubenswrapper[4949]: I0216 11:20:11.897710 4949 generic.go:334] "Generic (PLEG): container finished" podID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerID="dfc83cffd0ca433a0e3a48f182cd59776e40fe084c63797fb80dae4519750ecb" exitCode=0 Feb 16 11:20:12 crc kubenswrapper[4949]: I0216 11:20:12.153633 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l5qft"] Feb 16 11:20:12 crc kubenswrapper[4949]: W0216 11:20:12.185537 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad125dc9_72e2_4a6d_bc98_3189f7766fbd.slice/crio-177ec17d3e70e24f7ac6e7815bc1faf667f1182ba90747aae91a863eb1123608 WatchSource:0}: Error finding container 177ec17d3e70e24f7ac6e7815bc1faf667f1182ba90747aae91a863eb1123608: Status 404 returned error can't find the container with id 177ec17d3e70e24f7ac6e7815bc1faf667f1182ba90747aae91a863eb1123608 Feb 16 11:20:12 crc kubenswrapper[4949]: I0216 11:20:12.905129 4949 generic.go:334] "Generic (PLEG): container finished" podID="97f849a0-661e-46b3-981e-3933736a1cce" containerID="7741d65a55cfc2dbcb2ee8f28f7acf297da8dc77d4ae2e9ceed070fd06b63b6d" exitCode=0 Feb 16 11:20:12 crc kubenswrapper[4949]: I0216 11:20:12.905209 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" event={"ID":"97f849a0-661e-46b3-981e-3933736a1cce","Type":"ContainerDied","Data":"7741d65a55cfc2dbcb2ee8f28f7acf297da8dc77d4ae2e9ceed070fd06b63b6d"} Feb 16 11:20:12 crc kubenswrapper[4949]: I0216 11:20:12.907261 4949 generic.go:334] "Generic (PLEG): container finished" podID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerID="895587cbeb9fd41c440f5ae30ff2c3ed28f3d401d2fb76ac26c360b1990ea744" exitCode=0 Feb 16 11:20:12 crc kubenswrapper[4949]: I0216 11:20:12.907386 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l5qft" event={"ID":"ad125dc9-72e2-4a6d-bc98-3189f7766fbd","Type":"ContainerDied","Data":"895587cbeb9fd41c440f5ae30ff2c3ed28f3d401d2fb76ac26c360b1990ea744"} Feb 16 11:20:12 crc kubenswrapper[4949]: I0216 11:20:12.907450 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l5qft" event={"ID":"ad125dc9-72e2-4a6d-bc98-3189f7766fbd","Type":"ContainerStarted","Data":"177ec17d3e70e24f7ac6e7815bc1faf667f1182ba90747aae91a863eb1123608"} Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.205514 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.304577 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-bundle\") pod \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.304618 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vkzs\" (UniqueName: \"kubernetes.io/projected/70c09ca8-bdb5-446a-8b4f-f57accb479a5-kube-api-access-9vkzs\") pod \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.304709 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-util\") pod \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\" (UID: \"70c09ca8-bdb5-446a-8b4f-f57accb479a5\") " Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.305965 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-bundle" (OuterVolumeSpecName: "bundle") pod "70c09ca8-bdb5-446a-8b4f-f57accb479a5" (UID: "70c09ca8-bdb5-446a-8b4f-f57accb479a5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.309832 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70c09ca8-bdb5-446a-8b4f-f57accb479a5-kube-api-access-9vkzs" (OuterVolumeSpecName: "kube-api-access-9vkzs") pod "70c09ca8-bdb5-446a-8b4f-f57accb479a5" (UID: "70c09ca8-bdb5-446a-8b4f-f57accb479a5"). InnerVolumeSpecName "kube-api-access-9vkzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.320384 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-util" (OuterVolumeSpecName: "util") pod "70c09ca8-bdb5-446a-8b4f-f57accb479a5" (UID: "70c09ca8-bdb5-446a-8b4f-f57accb479a5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.407651 4949 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-util\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.407691 4949 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/70c09ca8-bdb5-446a-8b4f-f57accb479a5-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.407702 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vkzs\" (UniqueName: \"kubernetes.io/projected/70c09ca8-bdb5-446a-8b4f-f57accb479a5-kube-api-access-9vkzs\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.917230 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" event={"ID":"70c09ca8-bdb5-446a-8b4f-f57accb479a5","Type":"ContainerDied","Data":"5132e39b89aa18afa5cd36ddfe59933311d7b14107d56f74ee3a5841817a04d9"} Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.917294 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5132e39b89aa18afa5cd36ddfe59933311d7b14107d56f74ee3a5841817a04d9" Feb 16 11:20:13 crc kubenswrapper[4949]: I0216 11:20:13.917382 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.153804 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.218730 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-bundle\") pod \"97f849a0-661e-46b3-981e-3933736a1cce\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.218830 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk4fs\" (UniqueName: \"kubernetes.io/projected/97f849a0-661e-46b3-981e-3933736a1cce-kube-api-access-nk4fs\") pod \"97f849a0-661e-46b3-981e-3933736a1cce\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.218878 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-util\") pod \"97f849a0-661e-46b3-981e-3933736a1cce\" (UID: \"97f849a0-661e-46b3-981e-3933736a1cce\") " Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.220248 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-bundle" (OuterVolumeSpecName: "bundle") pod "97f849a0-661e-46b3-981e-3933736a1cce" (UID: "97f849a0-661e-46b3-981e-3933736a1cce"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.223713 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97f849a0-661e-46b3-981e-3933736a1cce-kube-api-access-nk4fs" (OuterVolumeSpecName: "kube-api-access-nk4fs") pod "97f849a0-661e-46b3-981e-3933736a1cce" (UID: "97f849a0-661e-46b3-981e-3933736a1cce"). InnerVolumeSpecName "kube-api-access-nk4fs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.231959 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-util" (OuterVolumeSpecName: "util") pod "97f849a0-661e-46b3-981e-3933736a1cce" (UID: "97f849a0-661e-46b3-981e-3933736a1cce"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.320332 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk4fs\" (UniqueName: \"kubernetes.io/projected/97f849a0-661e-46b3-981e-3933736a1cce-kube-api-access-nk4fs\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.320363 4949 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-util\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.320373 4949 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/97f849a0-661e-46b3-981e-3933736a1cce-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.927216 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" event={"ID":"97f849a0-661e-46b3-981e-3933736a1cce","Type":"ContainerDied","Data":"97ad33b9a6a1701663a27d30675ea10d89220dc239d0d59ae63f704f6184cf0b"} Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.927560 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97ad33b9a6a1701663a27d30675ea10d89220dc239d0d59ae63f704f6184cf0b" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.927673 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8" Feb 16 11:20:14 crc kubenswrapper[4949]: I0216 11:20:14.930148 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l5qft" event={"ID":"ad125dc9-72e2-4a6d-bc98-3189f7766fbd","Type":"ContainerStarted","Data":"c1191064aececc306f73a99811a829415567a303bcc9f58e95bd8ae3686b6eaf"} Feb 16 11:20:15 crc kubenswrapper[4949]: I0216 11:20:15.940122 4949 generic.go:334] "Generic (PLEG): container finished" podID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerID="c1191064aececc306f73a99811a829415567a303bcc9f58e95bd8ae3686b6eaf" exitCode=0 Feb 16 11:20:15 crc kubenswrapper[4949]: I0216 11:20:15.940202 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l5qft" event={"ID":"ad125dc9-72e2-4a6d-bc98-3189f7766fbd","Type":"ContainerDied","Data":"c1191064aececc306f73a99811a829415567a303bcc9f58e95bd8ae3686b6eaf"} Feb 16 11:20:16 crc kubenswrapper[4949]: I0216 11:20:16.950401 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l5qft" event={"ID":"ad125dc9-72e2-4a6d-bc98-3189f7766fbd","Type":"ContainerStarted","Data":"7f795cf29c1f11f5754de1ca4107fc99c374f7f18013965dff9deb6d9fdfd0a3"} Feb 16 11:20:16 crc kubenswrapper[4949]: I0216 11:20:16.977424 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l5qft" podStartSLOduration=2.551908611 podStartE2EDuration="5.977384131s" podCreationTimestamp="2026-02-16 11:20:11 +0000 UTC" firstStartedPulling="2026-02-16 11:20:12.908804315 +0000 UTC m=+802.538138480" lastFinishedPulling="2026-02-16 11:20:16.334279835 +0000 UTC m=+805.963614000" observedRunningTime="2026-02-16 11:20:16.976416894 +0000 UTC m=+806.605751069" watchObservedRunningTime="2026-02-16 11:20:16.977384131 +0000 UTC m=+806.606718336" Feb 16 11:20:21 crc kubenswrapper[4949]: I0216 11:20:21.687393 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:21 crc kubenswrapper[4949]: I0216 11:20:21.688634 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:22 crc kubenswrapper[4949]: I0216 11:20:22.727033 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-l5qft" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="registry-server" probeResult="failure" output=< Feb 16 11:20:22 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:20:22 crc kubenswrapper[4949]: > Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177403 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk"] Feb 16 11:20:25 crc kubenswrapper[4949]: E0216 11:20:25.177695 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerName="extract" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177709 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerName="extract" Feb 16 11:20:25 crc kubenswrapper[4949]: E0216 11:20:25.177718 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f849a0-661e-46b3-981e-3933736a1cce" containerName="util" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177724 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f849a0-661e-46b3-981e-3933736a1cce" containerName="util" Feb 16 11:20:25 crc kubenswrapper[4949]: E0216 11:20:25.177734 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerName="pull" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177740 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerName="pull" Feb 16 11:20:25 crc kubenswrapper[4949]: E0216 11:20:25.177760 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerName="util" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177767 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerName="util" Feb 16 11:20:25 crc kubenswrapper[4949]: E0216 11:20:25.177779 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f849a0-661e-46b3-981e-3933736a1cce" containerName="pull" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177785 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f849a0-661e-46b3-981e-3933736a1cce" containerName="pull" Feb 16 11:20:25 crc kubenswrapper[4949]: E0216 11:20:25.177799 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f849a0-661e-46b3-981e-3933736a1cce" containerName="extract" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177806 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f849a0-661e-46b3-981e-3933736a1cce" containerName="extract" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177944 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="70c09ca8-bdb5-446a-8b4f-f57accb479a5" containerName="extract" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.177966 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="97f849a0-661e-46b3-981e-3933736a1cce" containerName="extract" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.178886 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.182700 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.182987 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.183128 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-w8tfp" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.189793 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.190060 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.193665 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.256730 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk"] Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.277340 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sft7f\" (UniqueName: \"kubernetes.io/projected/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-kube-api-access-sft7f\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.277493 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.277587 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-manager-config\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.277617 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-apiservice-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.277871 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-webhook-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.379434 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.379510 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-manager-config\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.379535 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-apiservice-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.379604 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-webhook-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.379628 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sft7f\" (UniqueName: \"kubernetes.io/projected/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-kube-api-access-sft7f\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.381538 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-manager-config\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.388258 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.389015 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-apiservice-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.393456 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-webhook-cert\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.417112 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sft7f\" (UniqueName: \"kubernetes.io/projected/b0fa6cb2-2288-43e5-bd0a-065e92f72ece-kube-api-access-sft7f\") pod \"loki-operator-controller-manager-85b64669c-vc5kk\" (UID: \"b0fa6cb2-2288-43e5-bd0a-065e92f72ece\") " pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.505131 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:25 crc kubenswrapper[4949]: I0216 11:20:25.789162 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk"] Feb 16 11:20:26 crc kubenswrapper[4949]: I0216 11:20:26.029149 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" event={"ID":"b0fa6cb2-2288-43e5-bd0a-065e92f72ece","Type":"ContainerStarted","Data":"f7d4634ea1398bcf7663364ae9dead9d6437efa67cf2552549f33928ea294f14"} Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.486207 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-c769fd969-wlw2b"] Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.487493 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-c769fd969-wlw2b" Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.490628 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-g925k" Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.490705 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.490710 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.507074 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-c769fd969-wlw2b"] Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.511344 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m5dd\" (UniqueName: \"kubernetes.io/projected/1b22393b-f597-4dad-8eb6-587d7a82e31b-kube-api-access-4m5dd\") pod \"cluster-logging-operator-c769fd969-wlw2b\" (UID: \"1b22393b-f597-4dad-8eb6-587d7a82e31b\") " pod="openshift-logging/cluster-logging-operator-c769fd969-wlw2b" Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.612554 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m5dd\" (UniqueName: \"kubernetes.io/projected/1b22393b-f597-4dad-8eb6-587d7a82e31b-kube-api-access-4m5dd\") pod \"cluster-logging-operator-c769fd969-wlw2b\" (UID: \"1b22393b-f597-4dad-8eb6-587d7a82e31b\") " pod="openshift-logging/cluster-logging-operator-c769fd969-wlw2b" Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.645189 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m5dd\" (UniqueName: \"kubernetes.io/projected/1b22393b-f597-4dad-8eb6-587d7a82e31b-kube-api-access-4m5dd\") pod \"cluster-logging-operator-c769fd969-wlw2b\" (UID: \"1b22393b-f597-4dad-8eb6-587d7a82e31b\") " pod="openshift-logging/cluster-logging-operator-c769fd969-wlw2b" Feb 16 11:20:27 crc kubenswrapper[4949]: I0216 11:20:27.814056 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-c769fd969-wlw2b" Feb 16 11:20:28 crc kubenswrapper[4949]: I0216 11:20:28.527356 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-c769fd969-wlw2b"] Feb 16 11:20:29 crc kubenswrapper[4949]: I0216 11:20:29.412507 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-c769fd969-wlw2b" event={"ID":"1b22393b-f597-4dad-8eb6-587d7a82e31b","Type":"ContainerStarted","Data":"1b76d2f4fad96e6e750183f4d7c50a31af7d3201983cafdaf9bc0a519ae56f38"} Feb 16 11:20:31 crc kubenswrapper[4949]: I0216 11:20:31.748864 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:31 crc kubenswrapper[4949]: I0216 11:20:31.808419 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:33 crc kubenswrapper[4949]: I0216 11:20:33.453983 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" event={"ID":"b0fa6cb2-2288-43e5-bd0a-065e92f72ece","Type":"ContainerStarted","Data":"17601884ad2a3e3143f898b0344568dc4b3c113c556041154bc44ccef345eece"} Feb 16 11:20:34 crc kubenswrapper[4949]: I0216 11:20:34.550809 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:20:34 crc kubenswrapper[4949]: I0216 11:20:34.550860 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:20:34 crc kubenswrapper[4949]: I0216 11:20:34.550915 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:20:34 crc kubenswrapper[4949]: I0216 11:20:34.551596 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"060f940f61e708f7f4d603618a347c5e6eb0b808f7ca4a5027e6133a9e486da3"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:20:34 crc kubenswrapper[4949]: I0216 11:20:34.551666 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://060f940f61e708f7f4d603618a347c5e6eb0b808f7ca4a5027e6133a9e486da3" gracePeriod=600 Feb 16 11:20:35 crc kubenswrapper[4949]: I0216 11:20:35.167603 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l5qft"] Feb 16 11:20:35 crc kubenswrapper[4949]: I0216 11:20:35.168591 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l5qft" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="registry-server" containerID="cri-o://7f795cf29c1f11f5754de1ca4107fc99c374f7f18013965dff9deb6d9fdfd0a3" gracePeriod=2 Feb 16 11:20:35 crc kubenswrapper[4949]: I0216 11:20:35.476397 4949 generic.go:334] "Generic (PLEG): container finished" podID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerID="7f795cf29c1f11f5754de1ca4107fc99c374f7f18013965dff9deb6d9fdfd0a3" exitCode=0 Feb 16 11:20:35 crc kubenswrapper[4949]: I0216 11:20:35.476495 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l5qft" event={"ID":"ad125dc9-72e2-4a6d-bc98-3189f7766fbd","Type":"ContainerDied","Data":"7f795cf29c1f11f5754de1ca4107fc99c374f7f18013965dff9deb6d9fdfd0a3"} Feb 16 11:20:35 crc kubenswrapper[4949]: I0216 11:20:35.481475 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="060f940f61e708f7f4d603618a347c5e6eb0b808f7ca4a5027e6133a9e486da3" exitCode=0 Feb 16 11:20:35 crc kubenswrapper[4949]: I0216 11:20:35.481525 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"060f940f61e708f7f4d603618a347c5e6eb0b808f7ca4a5027e6133a9e486da3"} Feb 16 11:20:35 crc kubenswrapper[4949]: I0216 11:20:35.481574 4949 scope.go:117] "RemoveContainer" containerID="749481410beaac0c07fd7ccbbe2c5d04579b8c026030bdd77c0733972cd5371b" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.131835 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.193737 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-utilities\") pod \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.194035 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-catalog-content\") pod \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.194125 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhftc\" (UniqueName: \"kubernetes.io/projected/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-kube-api-access-fhftc\") pod \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\" (UID: \"ad125dc9-72e2-4a6d-bc98-3189f7766fbd\") " Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.194818 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-utilities" (OuterVolumeSpecName: "utilities") pod "ad125dc9-72e2-4a6d-bc98-3189f7766fbd" (UID: "ad125dc9-72e2-4a6d-bc98-3189f7766fbd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.209726 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-kube-api-access-fhftc" (OuterVolumeSpecName: "kube-api-access-fhftc") pod "ad125dc9-72e2-4a6d-bc98-3189f7766fbd" (UID: "ad125dc9-72e2-4a6d-bc98-3189f7766fbd"). InnerVolumeSpecName "kube-api-access-fhftc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.295927 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.296653 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhftc\" (UniqueName: \"kubernetes.io/projected/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-kube-api-access-fhftc\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.336908 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad125dc9-72e2-4a6d-bc98-3189f7766fbd" (UID: "ad125dc9-72e2-4a6d-bc98-3189f7766fbd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.397837 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad125dc9-72e2-4a6d-bc98-3189f7766fbd-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.537650 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l5qft" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.537660 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l5qft" event={"ID":"ad125dc9-72e2-4a6d-bc98-3189f7766fbd","Type":"ContainerDied","Data":"177ec17d3e70e24f7ac6e7815bc1faf667f1182ba90747aae91a863eb1123608"} Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.538133 4949 scope.go:117] "RemoveContainer" containerID="7f795cf29c1f11f5754de1ca4107fc99c374f7f18013965dff9deb6d9fdfd0a3" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.540378 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-c769fd969-wlw2b" event={"ID":"1b22393b-f597-4dad-8eb6-587d7a82e31b","Type":"ContainerStarted","Data":"7a6c14d0685d0cc776248496e42f9d1a2ae8d8d8c44d3ca7007407e7b373237d"} Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.542997 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" event={"ID":"b0fa6cb2-2288-43e5-bd0a-065e92f72ece","Type":"ContainerStarted","Data":"f11b1fd74ae98c25da9c9e744c53a600e2c373b0cfd477d7fab6cdaa9d9877ac"} Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.544187 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.552942 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"3be1216f0de04908b66655ea21e2d3a0e3a372ff9aac95cc621972831b9f6c40"} Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.556604 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.563201 4949 scope.go:117] "RemoveContainer" containerID="c1191064aececc306f73a99811a829415567a303bcc9f58e95bd8ae3686b6eaf" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.596096 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-c769fd969-wlw2b" podStartSLOduration=1.9759528259999999 podStartE2EDuration="14.59607544s" podCreationTimestamp="2026-02-16 11:20:27 +0000 UTC" firstStartedPulling="2026-02-16 11:20:28.545769335 +0000 UTC m=+818.175103500" lastFinishedPulling="2026-02-16 11:20:41.165891949 +0000 UTC m=+830.795226114" observedRunningTime="2026-02-16 11:20:41.584577485 +0000 UTC m=+831.213911650" watchObservedRunningTime="2026-02-16 11:20:41.59607544 +0000 UTC m=+831.225409605" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.620435 4949 scope.go:117] "RemoveContainer" containerID="895587cbeb9fd41c440f5ae30ff2c3ed28f3d401d2fb76ac26c360b1990ea744" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.623990 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-85b64669c-vc5kk" podStartSLOduration=1.253079123 podStartE2EDuration="16.623976248s" podCreationTimestamp="2026-02-16 11:20:25 +0000 UTC" firstStartedPulling="2026-02-16 11:20:25.810887062 +0000 UTC m=+815.440221237" lastFinishedPulling="2026-02-16 11:20:41.181784207 +0000 UTC m=+830.811118362" observedRunningTime="2026-02-16 11:20:41.621743515 +0000 UTC m=+831.251077680" watchObservedRunningTime="2026-02-16 11:20:41.623976248 +0000 UTC m=+831.253310413" Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.694299 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l5qft"] Feb 16 11:20:41 crc kubenswrapper[4949]: I0216 11:20:41.717038 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-l5qft"] Feb 16 11:20:43 crc kubenswrapper[4949]: I0216 11:20:43.244505 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" path="/var/lib/kubelet/pods/ad125dc9-72e2-4a6d-bc98-3189f7766fbd/volumes" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.831593 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Feb 16 11:20:46 crc kubenswrapper[4949]: E0216 11:20:46.832790 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="extract-content" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.832808 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="extract-content" Feb 16 11:20:46 crc kubenswrapper[4949]: E0216 11:20:46.832822 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="registry-server" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.832828 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="registry-server" Feb 16 11:20:46 crc kubenswrapper[4949]: E0216 11:20:46.832852 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="extract-utilities" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.832858 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="extract-utilities" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.832980 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad125dc9-72e2-4a6d-bc98-3189f7766fbd" containerName="registry-server" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.833629 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.835268 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.835578 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.846043 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.891730 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfvfn\" (UniqueName: \"kubernetes.io/projected/ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623-kube-api-access-xfvfn\") pod \"minio\" (UID: \"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623\") " pod="minio-dev/minio" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.891792 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5654968b-1687-411d-860c-8a08425fa656\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5654968b-1687-411d-860c-8a08425fa656\") pod \"minio\" (UID: \"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623\") " pod="minio-dev/minio" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.993042 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5654968b-1687-411d-860c-8a08425fa656\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5654968b-1687-411d-860c-8a08425fa656\") pod \"minio\" (UID: \"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623\") " pod="minio-dev/minio" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.993224 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfvfn\" (UniqueName: \"kubernetes.io/projected/ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623-kube-api-access-xfvfn\") pod \"minio\" (UID: \"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623\") " pod="minio-dev/minio" Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.996235 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:20:46 crc kubenswrapper[4949]: I0216 11:20:46.996272 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5654968b-1687-411d-860c-8a08425fa656\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5654968b-1687-411d-860c-8a08425fa656\") pod \"minio\" (UID: \"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6f684fa093f9cf543811583ac377f303c323996a25a34a54757fcfeb4557dad4/globalmount\"" pod="minio-dev/minio" Feb 16 11:20:47 crc kubenswrapper[4949]: I0216 11:20:47.019659 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfvfn\" (UniqueName: \"kubernetes.io/projected/ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623-kube-api-access-xfvfn\") pod \"minio\" (UID: \"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623\") " pod="minio-dev/minio" Feb 16 11:20:47 crc kubenswrapper[4949]: I0216 11:20:47.024954 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5654968b-1687-411d-860c-8a08425fa656\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5654968b-1687-411d-860c-8a08425fa656\") pod \"minio\" (UID: \"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623\") " pod="minio-dev/minio" Feb 16 11:20:47 crc kubenswrapper[4949]: I0216 11:20:47.151915 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Feb 16 11:20:47 crc kubenswrapper[4949]: I0216 11:20:47.372573 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Feb 16 11:20:47 crc kubenswrapper[4949]: W0216 11:20:47.376180 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podded0cdd6_9b27_4bf5_b2a0_1ca74da7e623.slice/crio-519a74884ec188447834cf6c0923ea7869fc0cfdf4bdd84bb927a631fc308d2f WatchSource:0}: Error finding container 519a74884ec188447834cf6c0923ea7869fc0cfdf4bdd84bb927a631fc308d2f: Status 404 returned error can't find the container with id 519a74884ec188447834cf6c0923ea7869fc0cfdf4bdd84bb927a631fc308d2f Feb 16 11:20:47 crc kubenswrapper[4949]: I0216 11:20:47.594136 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623","Type":"ContainerStarted","Data":"519a74884ec188447834cf6c0923ea7869fc0cfdf4bdd84bb927a631fc308d2f"} Feb 16 11:20:51 crc kubenswrapper[4949]: I0216 11:20:51.627253 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"ded0cdd6-9b27-4bf5-b2a0-1ca74da7e623","Type":"ContainerStarted","Data":"3210a0fa4b0ad5e0f191b08481ff1f1be3659a24cd817a881605d7204581edb5"} Feb 16 11:20:51 crc kubenswrapper[4949]: I0216 11:20:51.651006 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=4.461085986 podStartE2EDuration="7.650986513s" podCreationTimestamp="2026-02-16 11:20:44 +0000 UTC" firstStartedPulling="2026-02-16 11:20:47.379054883 +0000 UTC m=+837.008389048" lastFinishedPulling="2026-02-16 11:20:50.56895541 +0000 UTC m=+840.198289575" observedRunningTime="2026-02-16 11:20:51.645495698 +0000 UTC m=+841.274829873" watchObservedRunningTime="2026-02-16 11:20:51.650986513 +0000 UTC m=+841.280320678" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.530706 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n"] Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.533074 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.535911 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.536460 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.536694 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.536820 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.551691 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n"] Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.562340 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-wn9kl" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.653987 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52cb84ef-5333-451e-aa69-7af33124627b-config\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.654642 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vn4s\" (UniqueName: \"kubernetes.io/projected/52cb84ef-5333-451e-aa69-7af33124627b-kube-api-access-2vn4s\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.654689 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.654736 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.654769 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-distributor-http\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.690798 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-76bf7b6d45-gcct7"] Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.696044 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.700745 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.700998 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.701060 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.709900 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76bf7b6d45-gcct7"] Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.757602 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52cb84ef-5333-451e-aa69-7af33124627b-config\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.757660 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vn4s\" (UniqueName: \"kubernetes.io/projected/52cb84ef-5333-451e-aa69-7af33124627b-kube-api-access-2vn4s\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.757701 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.757736 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.757762 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-distributor-http\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.764200 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52cb84ef-5333-451e-aa69-7af33124627b-config\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.769805 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.785788 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-distributor-http\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.787881 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/52cb84ef-5333-451e-aa69-7af33124627b-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.796845 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vn4s\" (UniqueName: \"kubernetes.io/projected/52cb84ef-5333-451e-aa69-7af33124627b-kube-api-access-2vn4s\") pod \"logging-loki-distributor-5d5548c9f5-dl75n\" (UID: \"52cb84ef-5333-451e-aa69-7af33124627b\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.836151 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw"] Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.837063 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.842802 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.845233 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw"] Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.845581 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.862502 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-querier-grpc\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.862704 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-querier-http\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.862824 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz8gl\" (UniqueName: \"kubernetes.io/projected/c8fca297-202a-44af-81f7-ecab29bc0472-kube-api-access-zz8gl\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.862852 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-ca-bundle\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.863015 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-s3\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.863042 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8fca297-202a-44af-81f7-ecab29bc0472-config\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.863426 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.959237 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-7d9d97666-94s4k"] Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.961036 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964252 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964365 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-querier-grpc\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964389 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964426 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964448 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-querier-http\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964488 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz8gl\" (UniqueName: \"kubernetes.io/projected/c8fca297-202a-44af-81f7-ecab29bc0472-kube-api-access-zz8gl\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964510 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-ca-bundle\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964539 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kffzr\" (UniqueName: \"kubernetes.io/projected/27b4f51a-e116-4a53-adec-61ef733835ca-kube-api-access-kffzr\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964750 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-s3\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964785 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b4f51a-e116-4a53-adec-61ef733835ca-config\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.964827 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8fca297-202a-44af-81f7-ecab29bc0472-config\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.966807 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8fca297-202a-44af-81f7-ecab29bc0472-config\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.967657 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-ca-bundle\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.968022 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.968825 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.968973 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.969138 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.971778 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-7d9d97666-nk5qs"] Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.973791 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.975945 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-s3\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.976252 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.990674 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-querier-grpc\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.991206 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/c8fca297-202a-44af-81f7-ecab29bc0472-logging-loki-querier-http\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:57 crc kubenswrapper[4949]: I0216 11:20:57.994333 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz8gl\" (UniqueName: \"kubernetes.io/projected/c8fca297-202a-44af-81f7-ecab29bc0472-kube-api-access-zz8gl\") pod \"logging-loki-querier-76bf7b6d45-gcct7\" (UID: \"c8fca297-202a-44af-81f7-ecab29bc0472\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.005042 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7d9d97666-94s4k"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.005322 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-2srqz" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.014503 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.015047 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7d9d97666-nk5qs"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.067780 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-lokistack-gateway\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.067842 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.067869 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b4f51a-e116-4a53-adec-61ef733835ca-config\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.067889 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4j5lt\" (UniqueName: \"kubernetes.io/projected/4c4cf393-1f34-415d-bd8a-2cd87dc62593-kube-api-access-4j5lt\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.067916 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068064 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tenants\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068085 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068110 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068128 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068146 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068161 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-lokistack-gateway\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068201 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068218 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-rbac\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068233 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068251 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tenants\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068292 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068313 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tls-secret\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068331 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kffzr\" (UniqueName: \"kubernetes.io/projected/27b4f51a-e116-4a53-adec-61ef733835ca-kube-api-access-kffzr\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068349 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tls-secret\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068366 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkjmn\" (UniqueName: \"kubernetes.io/projected/46c6e5be-0462-40bf-ab5e-f052cb9163b6-kube-api-access-gkjmn\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.068381 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-rbac\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.069812 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b4f51a-e116-4a53-adec-61ef733835ca-config\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.070709 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.088940 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.089367 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/27b4f51a-e116-4a53-adec-61ef733835ca-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.096184 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kffzr\" (UniqueName: \"kubernetes.io/projected/27b4f51a-e116-4a53-adec-61ef733835ca-kube-api-access-kffzr\") pod \"logging-loki-query-frontend-6d6859c548-8h6lw\" (UID: \"27b4f51a-e116-4a53-adec-61ef733835ca\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.155553 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171291 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tenants\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171346 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171371 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171393 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-lokistack-gateway\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171415 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171434 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-rbac\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171450 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171466 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tenants\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171498 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171518 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tls-secret\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171543 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tls-secret\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171562 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkjmn\" (UniqueName: \"kubernetes.io/projected/46c6e5be-0462-40bf-ab5e-f052cb9163b6-kube-api-access-gkjmn\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171577 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-rbac\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171602 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-lokistack-gateway\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171633 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.171651 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4j5lt\" (UniqueName: \"kubernetes.io/projected/4c4cf393-1f34-415d-bd8a-2cd87dc62593-kube-api-access-4j5lt\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: E0216 11:20:58.175003 4949 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Feb 16 11:20:58 crc kubenswrapper[4949]: E0216 11:20:58.175073 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tls-secret podName:46c6e5be-0462-40bf-ab5e-f052cb9163b6 nodeName:}" failed. No retries permitted until 2026-02-16 11:20:58.675052615 +0000 UTC m=+848.304386790 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tls-secret") pod "logging-loki-gateway-7d9d97666-nk5qs" (UID: "46c6e5be-0462-40bf-ab5e-f052cb9163b6") : secret "logging-loki-gateway-http" not found Feb 16 11:20:58 crc kubenswrapper[4949]: E0216 11:20:58.175106 4949 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Feb 16 11:20:58 crc kubenswrapper[4949]: E0216 11:20:58.175170 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tls-secret podName:4c4cf393-1f34-415d-bd8a-2cd87dc62593 nodeName:}" failed. No retries permitted until 2026-02-16 11:20:58.675149888 +0000 UTC m=+848.304484053 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tls-secret") pod "logging-loki-gateway-7d9d97666-94s4k" (UID: "4c4cf393-1f34-415d-bd8a-2cd87dc62593") : secret "logging-loki-gateway-http" not found Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.176264 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.176325 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.176981 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-lokistack-gateway\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.177094 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-rbac\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.177663 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-lokistack-gateway\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.177782 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.177946 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.179862 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c6e5be-0462-40bf-ab5e-f052cb9163b6-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.180563 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4c4cf393-1f34-415d-bd8a-2cd87dc62593-rbac\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.185740 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.187923 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tenants\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.190790 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4j5lt\" (UniqueName: \"kubernetes.io/projected/4c4cf393-1f34-415d-bd8a-2cd87dc62593-kube-api-access-4j5lt\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.191130 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkjmn\" (UniqueName: \"kubernetes.io/projected/46c6e5be-0462-40bf-ab5e-f052cb9163b6-kube-api-access-gkjmn\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.192112 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tenants\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.362307 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76bf7b6d45-gcct7"] Feb 16 11:20:58 crc kubenswrapper[4949]: W0216 11:20:58.362868 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc8fca297_202a_44af_81f7_ecab29bc0472.slice/crio-0ac465cb3dd12a31fe421ca6f1f4d431cc441c3f3524fd601d099f555386e717 WatchSource:0}: Error finding container 0ac465cb3dd12a31fe421ca6f1f4d431cc441c3f3524fd601d099f555386e717: Status 404 returned error can't find the container with id 0ac465cb3dd12a31fe421ca6f1f4d431cc441c3f3524fd601d099f555386e717 Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.454857 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.498276 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw"] Feb 16 11:20:58 crc kubenswrapper[4949]: W0216 11:20:58.502304 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27b4f51a_e116_4a53_adec_61ef733835ca.slice/crio-956fb9fb651b4147ea80b517c360214f4895a779de1a3e2aa20eaa63f1b43687 WatchSource:0}: Error finding container 956fb9fb651b4147ea80b517c360214f4895a779de1a3e2aa20eaa63f1b43687: Status 404 returned error can't find the container with id 956fb9fb651b4147ea80b517c360214f4895a779de1a3e2aa20eaa63f1b43687 Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.682597 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tls-secret\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.682657 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tls-secret\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.687753 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4c4cf393-1f34-415d-bd8a-2cd87dc62593-tls-secret\") pod \"logging-loki-gateway-7d9d97666-94s4k\" (UID: \"4c4cf393-1f34-415d-bd8a-2cd87dc62593\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.689509 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/46c6e5be-0462-40bf-ab5e-f052cb9163b6-tls-secret\") pod \"logging-loki-gateway-7d9d97666-nk5qs\" (UID: \"46c6e5be-0462-40bf-ab5e-f052cb9163b6\") " pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.693816 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" event={"ID":"c8fca297-202a-44af-81f7-ecab29bc0472","Type":"ContainerStarted","Data":"0ac465cb3dd12a31fe421ca6f1f4d431cc441c3f3524fd601d099f555386e717"} Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.695191 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" event={"ID":"27b4f51a-e116-4a53-adec-61ef733835ca","Type":"ContainerStarted","Data":"956fb9fb651b4147ea80b517c360214f4895a779de1a3e2aa20eaa63f1b43687"} Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.696026 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" event={"ID":"52cb84ef-5333-451e-aa69-7af33124627b","Type":"ContainerStarted","Data":"a030d4ef8959952469498b8779721b3132e4c13055ada71b0f402b49e749c0f1"} Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.711794 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.713139 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.716614 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.732618 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.780706 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.831323 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.832878 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.834966 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.835473 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.837153 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.885072 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.885108 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.885131 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.885158 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b74cc341-f1fb-4e3d-b277-ba199575d28d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b74cc341-f1fb-4e3d-b277-ba199575d28d\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.885511 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-config\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.885629 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-679ws\" (UniqueName: \"kubernetes.io/projected/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-kube-api-access-679ws\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.885793 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.886096 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-46ba2910-a670-446f-845a-e7070166b24b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-46ba2910-a670-446f-845a-e7070166b24b\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.906128 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.907051 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.909488 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.917697 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.920474 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.934321 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.988786 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.988856 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2cbb075-2878-4d94-adf5-c92888ff4c2a-config\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.988892 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwvwd\" (UniqueName: \"kubernetes.io/projected/a2cbb075-2878-4d94-adf5-c92888ff4c2a-kube-api-access-dwvwd\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.988921 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.988954 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-679ws\" (UniqueName: \"kubernetes.io/projected/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-kube-api-access-679ws\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.988978 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.988999 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05030bcd-7441-4c52-b653-e1f112e5d7ff-config\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.989029 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-034e514c-a2c1-4e51-bb76-c168293f6bed\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-034e514c-a2c1-4e51-bb76-c168293f6bed\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.989072 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.989093 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.989116 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b74cc341-f1fb-4e3d-b277-ba199575d28d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b74cc341-f1fb-4e3d-b277-ba199575d28d\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.989150 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-config\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.989242 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.989266 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5ee29278-3930-463b-807c-a0a35e351eab\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5ee29278-3930-463b-807c-a0a35e351eab\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.989915 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.990717 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.990804 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.990829 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzzlr\" (UniqueName: \"kubernetes.io/projected/05030bcd-7441-4c52-b653-e1f112e5d7ff-kube-api-access-jzzlr\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.990853 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.990878 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.990904 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.990928 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-46ba2910-a670-446f-845a-e7070166b24b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-46ba2910-a670-446f-845a-e7070166b24b\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.990956 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.991799 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.993277 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-config\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.995280 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.995322 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b74cc341-f1fb-4e3d-b277-ba199575d28d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b74cc341-f1fb-4e3d-b277-ba199575d28d\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/878fbe9c955496f3e5a8e0c143bc7437107cc672180130711bc017da55da3851/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.996473 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.996529 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-46ba2910-a670-446f-845a-e7070166b24b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-46ba2910-a670-446f-845a-e7070166b24b\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/3f238d30940d9c5fabdfd4c6dffeeaced4c29de0eb8a2c5f4ed9146bca3c38f7/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:58 crc kubenswrapper[4949]: I0216 11:20:58.997766 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.000783 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.003794 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.020414 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-679ws\" (UniqueName: \"kubernetes.io/projected/d60af4cc-ac63-40af-8a3a-2933fb25ffc2-kube-api-access-679ws\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.041748 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b74cc341-f1fb-4e3d-b277-ba199575d28d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b74cc341-f1fb-4e3d-b277-ba199575d28d\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.045700 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-46ba2910-a670-446f-845a-e7070166b24b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-46ba2910-a670-446f-845a-e7070166b24b\") pod \"logging-loki-ingester-0\" (UID: \"d60af4cc-ac63-40af-8a3a-2933fb25ffc2\") " pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094461 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094531 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5ee29278-3930-463b-807c-a0a35e351eab\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5ee29278-3930-463b-807c-a0a35e351eab\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094561 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094592 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094616 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzzlr\" (UniqueName: \"kubernetes.io/projected/05030bcd-7441-4c52-b653-e1f112e5d7ff-kube-api-access-jzzlr\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094639 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094666 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094704 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.094740 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2cbb075-2878-4d94-adf5-c92888ff4c2a-config\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.095516 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwvwd\" (UniqueName: \"kubernetes.io/projected/a2cbb075-2878-4d94-adf5-c92888ff4c2a-kube-api-access-dwvwd\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.095559 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.095593 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.095611 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05030bcd-7441-4c52-b653-e1f112e5d7ff-config\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.095641 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-034e514c-a2c1-4e51-bb76-c168293f6bed\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-034e514c-a2c1-4e51-bb76-c168293f6bed\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.099827 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.100646 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2cbb075-2878-4d94-adf5-c92888ff4c2a-config\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.101475 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.101610 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05030bcd-7441-4c52-b653-e1f112e5d7ff-config\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.107962 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.108927 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.109150 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/05030bcd-7441-4c52-b653-e1f112e5d7ff-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.114460 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.116018 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.124204 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a2cbb075-2878-4d94-adf5-c92888ff4c2a-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.142288 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwvwd\" (UniqueName: \"kubernetes.io/projected/a2cbb075-2878-4d94-adf5-c92888ff4c2a-kube-api-access-dwvwd\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.146812 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.146890 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5ee29278-3930-463b-807c-a0a35e351eab\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5ee29278-3930-463b-807c-a0a35e351eab\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b57baec3709f7b4e2637ba5e29031c65e146ca297c5b56cd8f18b444e691a27b/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.147626 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.147651 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-034e514c-a2c1-4e51-bb76-c168293f6bed\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-034e514c-a2c1-4e51-bb76-c168293f6bed\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6fcde9c4708b2a17ca658b93a146cf7ccf91eb14257c344c88222882ee437775/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.151052 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzzlr\" (UniqueName: \"kubernetes.io/projected/05030bcd-7441-4c52-b653-e1f112e5d7ff-kube-api-access-jzzlr\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.302511 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5ee29278-3930-463b-807c-a0a35e351eab\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5ee29278-3930-463b-807c-a0a35e351eab\") pod \"logging-loki-compactor-0\" (UID: \"a2cbb075-2878-4d94-adf5-c92888ff4c2a\") " pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.305425 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-034e514c-a2c1-4e51-bb76-c168293f6bed\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-034e514c-a2c1-4e51-bb76-c168293f6bed\") pod \"logging-loki-index-gateway-0\" (UID: \"05030bcd-7441-4c52-b653-e1f112e5d7ff\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.344401 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.397610 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7d9d97666-94s4k"] Feb 16 11:20:59 crc kubenswrapper[4949]: W0216 11:20:59.414890 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c4cf393_1f34_415d_bd8a_2cd87dc62593.slice/crio-8059bc135ab7e4584652924551432856d2cfea363d0bd5e0aba57a12557ed326 WatchSource:0}: Error finding container 8059bc135ab7e4584652924551432856d2cfea363d0bd5e0aba57a12557ed326: Status 404 returned error can't find the container with id 8059bc135ab7e4584652924551432856d2cfea363d0bd5e0aba57a12557ed326 Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.451393 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.534350 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.720397 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7d9d97666-nk5qs"] Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.724858 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" event={"ID":"4c4cf393-1f34-415d-bd8a-2cd87dc62593","Type":"ContainerStarted","Data":"8059bc135ab7e4584652924551432856d2cfea363d0bd5e0aba57a12557ed326"} Feb 16 11:20:59 crc kubenswrapper[4949]: W0216 11:20:59.737199 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46c6e5be_0462_40bf_ab5e_f052cb9163b6.slice/crio-de927757177829d372989d2597e4b8e4e05083c04daa62b446c8225929e8e890 WatchSource:0}: Error finding container de927757177829d372989d2597e4b8e4e05083c04daa62b446c8225929e8e890: Status 404 returned error can't find the container with id de927757177829d372989d2597e4b8e4e05083c04daa62b446c8225929e8e890 Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.831230 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Feb 16 11:20:59 crc kubenswrapper[4949]: W0216 11:20:59.833974 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd60af4cc_ac63_40af_8a3a_2933fb25ffc2.slice/crio-76afdde931d76d0f51f982d3190f70b55947824266b1c7bebc6ec53094d1c07d WatchSource:0}: Error finding container 76afdde931d76d0f51f982d3190f70b55947824266b1c7bebc6ec53094d1c07d: Status 404 returned error can't find the container with id 76afdde931d76d0f51f982d3190f70b55947824266b1c7bebc6ec53094d1c07d Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.912905 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Feb 16 11:20:59 crc kubenswrapper[4949]: I0216 11:20:59.985443 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Feb 16 11:21:00 crc kubenswrapper[4949]: W0216 11:21:00.004898 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05030bcd_7441_4c52_b653_e1f112e5d7ff.slice/crio-ed23bb67a010f75a08fcca9ab2c09eda808d69aa947292345bce5d7415455507 WatchSource:0}: Error finding container ed23bb67a010f75a08fcca9ab2c09eda808d69aa947292345bce5d7415455507: Status 404 returned error can't find the container with id ed23bb67a010f75a08fcca9ab2c09eda808d69aa947292345bce5d7415455507 Feb 16 11:21:00 crc kubenswrapper[4949]: I0216 11:21:00.734274 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"05030bcd-7441-4c52-b653-e1f112e5d7ff","Type":"ContainerStarted","Data":"ed23bb67a010f75a08fcca9ab2c09eda808d69aa947292345bce5d7415455507"} Feb 16 11:21:00 crc kubenswrapper[4949]: I0216 11:21:00.735986 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"a2cbb075-2878-4d94-adf5-c92888ff4c2a","Type":"ContainerStarted","Data":"387186511aa942fbaf5709e48d63778d114bdaf09c552cc5d581101f53d2b7cb"} Feb 16 11:21:00 crc kubenswrapper[4949]: I0216 11:21:00.737338 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" event={"ID":"46c6e5be-0462-40bf-ab5e-f052cb9163b6","Type":"ContainerStarted","Data":"de927757177829d372989d2597e4b8e4e05083c04daa62b446c8225929e8e890"} Feb 16 11:21:00 crc kubenswrapper[4949]: I0216 11:21:00.738878 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"d60af4cc-ac63-40af-8a3a-2933fb25ffc2","Type":"ContainerStarted","Data":"76afdde931d76d0f51f982d3190f70b55947824266b1c7bebc6ec53094d1c07d"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.773497 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" event={"ID":"52cb84ef-5333-451e-aa69-7af33124627b","Type":"ContainerStarted","Data":"8922a5553c44a5f84d8f18eaf057991d36ad22344634ed30a63e709266c6c2b1"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.774210 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.775829 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" event={"ID":"4c4cf393-1f34-415d-bd8a-2cd87dc62593","Type":"ContainerStarted","Data":"9aa963dafe4ecb020dd76036e9730219c3b16bc62834b1fa5a260f7dd0993df2"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.778422 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" event={"ID":"46c6e5be-0462-40bf-ab5e-f052cb9163b6","Type":"ContainerStarted","Data":"142353e4ef22bc1e4e2d208a98dc4161681bf59631b644136048502c6d76a432"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.780380 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"d60af4cc-ac63-40af-8a3a-2933fb25ffc2","Type":"ContainerStarted","Data":"df5f14f775eecf9a337fc6522b2de09ad5dcbf0f7932fdeca93d6cc2de699a34"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.780520 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.782188 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"05030bcd-7441-4c52-b653-e1f112e5d7ff","Type":"ContainerStarted","Data":"c9033262dfaf5eb2b99c5fa02247c6cee9a5bcd7e5daa76257575eedefa58282"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.782272 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.783332 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"a2cbb075-2878-4d94-adf5-c92888ff4c2a","Type":"ContainerStarted","Data":"c091776932396c1eacdfd85724e781380402fd5837590581dd74167448005e1d"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.783610 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.784470 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" event={"ID":"c8fca297-202a-44af-81f7-ecab29bc0472","Type":"ContainerStarted","Data":"d46938ccc58bbc24dedabfd74eb3c42d8cfe4ccd6e33a625e315318c03c16190"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.784549 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.786747 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" event={"ID":"27b4f51a-e116-4a53-adec-61ef733835ca","Type":"ContainerStarted","Data":"7626a5c836d6ccaa646ea2d1a91846e570e1a1657fa3815f42e022dbd230e46b"} Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.787207 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.796155 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" podStartSLOduration=2.442178905 podStartE2EDuration="7.79613219s" podCreationTimestamp="2026-02-16 11:20:57 +0000 UTC" firstStartedPulling="2026-02-16 11:20:58.469936838 +0000 UTC m=+848.099271003" lastFinishedPulling="2026-02-16 11:21:03.823890133 +0000 UTC m=+853.453224288" observedRunningTime="2026-02-16 11:21:04.791743066 +0000 UTC m=+854.421077241" watchObservedRunningTime="2026-02-16 11:21:04.79613219 +0000 UTC m=+854.425466355" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.815434 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" podStartSLOduration=2.514231264 podStartE2EDuration="7.815411976s" podCreationTimestamp="2026-02-16 11:20:57 +0000 UTC" firstStartedPulling="2026-02-16 11:20:58.50464916 +0000 UTC m=+848.133983325" lastFinishedPulling="2026-02-16 11:21:03.805829872 +0000 UTC m=+853.435164037" observedRunningTime="2026-02-16 11:21:04.812840543 +0000 UTC m=+854.442174708" watchObservedRunningTime="2026-02-16 11:21:04.815411976 +0000 UTC m=+854.444746141" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.849018 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" podStartSLOduration=2.377461734 podStartE2EDuration="7.849002116s" podCreationTimestamp="2026-02-16 11:20:57 +0000 UTC" firstStartedPulling="2026-02-16 11:20:58.368658863 +0000 UTC m=+847.997993028" lastFinishedPulling="2026-02-16 11:21:03.840199245 +0000 UTC m=+853.469533410" observedRunningTime="2026-02-16 11:21:04.842962145 +0000 UTC m=+854.472296310" watchObservedRunningTime="2026-02-16 11:21:04.849002116 +0000 UTC m=+854.478336281" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.867316 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.961039988 podStartE2EDuration="7.867300024s" podCreationTimestamp="2026-02-16 11:20:57 +0000 UTC" firstStartedPulling="2026-02-16 11:20:59.934044501 +0000 UTC m=+849.563378666" lastFinishedPulling="2026-02-16 11:21:03.840304537 +0000 UTC m=+853.469638702" observedRunningTime="2026-02-16 11:21:04.866157011 +0000 UTC m=+854.495491176" watchObservedRunningTime="2026-02-16 11:21:04.867300024 +0000 UTC m=+854.496634179" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.898528 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=4.031934004 podStartE2EDuration="7.898504487s" podCreationTimestamp="2026-02-16 11:20:57 +0000 UTC" firstStartedPulling="2026-02-16 11:21:00.00822924 +0000 UTC m=+849.637563405" lastFinishedPulling="2026-02-16 11:21:03.874799723 +0000 UTC m=+853.504133888" observedRunningTime="2026-02-16 11:21:04.892675512 +0000 UTC m=+854.522009707" watchObservedRunningTime="2026-02-16 11:21:04.898504487 +0000 UTC m=+854.527838692" Feb 16 11:21:04 crc kubenswrapper[4949]: I0216 11:21:04.922413 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.888004691 podStartE2EDuration="7.922396332s" podCreationTimestamp="2026-02-16 11:20:57 +0000 UTC" firstStartedPulling="2026-02-16 11:20:59.837998144 +0000 UTC m=+849.467332309" lastFinishedPulling="2026-02-16 11:21:03.872389775 +0000 UTC m=+853.501723950" observedRunningTime="2026-02-16 11:21:04.919904562 +0000 UTC m=+854.549238757" watchObservedRunningTime="2026-02-16 11:21:04.922396332 +0000 UTC m=+854.551730497" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.843968 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" event={"ID":"4c4cf393-1f34-415d-bd8a-2cd87dc62593","Type":"ContainerStarted","Data":"db83ce0dd541f5f46e58dbd15013f8bd34ced77e259f97b7bbb6cd0921766361"} Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.844839 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.845304 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.847952 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" event={"ID":"46c6e5be-0462-40bf-ab5e-f052cb9163b6","Type":"ContainerStarted","Data":"154e3121a45a99b87f3275aa6aead11867e12bf6f7a248769b0bf2392760160f"} Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.849178 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.849216 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.858321 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.861380 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.862276 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.865238 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.873243 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-7d9d97666-94s4k" podStartSLOduration=3.320928056 podStartE2EDuration="11.873229669s" podCreationTimestamp="2026-02-16 11:20:57 +0000 UTC" firstStartedPulling="2026-02-16 11:20:59.444594263 +0000 UTC m=+849.073928418" lastFinishedPulling="2026-02-16 11:21:07.996895866 +0000 UTC m=+857.626230031" observedRunningTime="2026-02-16 11:21:08.868231928 +0000 UTC m=+858.497566093" watchObservedRunningTime="2026-02-16 11:21:08.873229669 +0000 UTC m=+858.502563834" Feb 16 11:21:08 crc kubenswrapper[4949]: I0216 11:21:08.899727 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-7d9d97666-nk5qs" podStartSLOduration=3.636953797 podStartE2EDuration="11.899711438s" podCreationTimestamp="2026-02-16 11:20:57 +0000 UTC" firstStartedPulling="2026-02-16 11:20:59.740270529 +0000 UTC m=+849.369604694" lastFinishedPulling="2026-02-16 11:21:08.00302817 +0000 UTC m=+857.632362335" observedRunningTime="2026-02-16 11:21:08.893194664 +0000 UTC m=+858.522528839" watchObservedRunningTime="2026-02-16 11:21:08.899711438 +0000 UTC m=+858.529045603" Feb 16 11:21:19 crc kubenswrapper[4949]: I0216 11:21:19.352187 4949 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Feb 16 11:21:19 crc kubenswrapper[4949]: I0216 11:21:19.353191 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="d60af4cc-ac63-40af-8a3a-2933fb25ffc2" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 16 11:21:19 crc kubenswrapper[4949]: I0216 11:21:19.459903 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Feb 16 11:21:19 crc kubenswrapper[4949]: I0216 11:21:19.548018 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Feb 16 11:21:27 crc kubenswrapper[4949]: I0216 11:21:27.871489 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-dl75n" Feb 16 11:21:28 crc kubenswrapper[4949]: I0216 11:21:28.023768 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-76bf7b6d45-gcct7" Feb 16 11:21:28 crc kubenswrapper[4949]: I0216 11:21:28.163398 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-8h6lw" Feb 16 11:21:29 crc kubenswrapper[4949]: I0216 11:21:29.350056 4949 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Feb 16 11:21:29 crc kubenswrapper[4949]: I0216 11:21:29.350482 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="d60af4cc-ac63-40af-8a3a-2933fb25ffc2" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.621118 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jr6k4"] Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.623456 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.635637 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jr6k4"] Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.660776 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsk24\" (UniqueName: \"kubernetes.io/projected/9a98e119-721e-4977-a450-a6b9d6b1e431-kube-api-access-nsk24\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.661390 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-utilities\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.661628 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-catalog-content\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.764345 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-utilities\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.764502 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-catalog-content\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.764621 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsk24\" (UniqueName: \"kubernetes.io/projected/9a98e119-721e-4977-a450-a6b9d6b1e431-kube-api-access-nsk24\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.765213 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-catalog-content\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.765169 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-utilities\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.792966 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsk24\" (UniqueName: \"kubernetes.io/projected/9a98e119-721e-4977-a450-a6b9d6b1e431-kube-api-access-nsk24\") pod \"community-operators-jr6k4\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:30 crc kubenswrapper[4949]: I0216 11:21:30.966430 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:31 crc kubenswrapper[4949]: I0216 11:21:31.288692 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jr6k4"] Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.035048 4949 generic.go:334] "Generic (PLEG): container finished" podID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerID="4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826" exitCode=0 Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.035105 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jr6k4" event={"ID":"9a98e119-721e-4977-a450-a6b9d6b1e431","Type":"ContainerDied","Data":"4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826"} Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.035137 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jr6k4" event={"ID":"9a98e119-721e-4977-a450-a6b9d6b1e431","Type":"ContainerStarted","Data":"a460ce2d7e2494d9f0c70cbd3f28a17ff8d811f64fec48f00e93b599ba65f6a6"} Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.799538 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dpw2n"] Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.804356 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.809078 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-catalog-content\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.809146 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-utilities\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.809251 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hvvq\" (UniqueName: \"kubernetes.io/projected/3431163c-eca9-47ca-93d2-779262d76774-kube-api-access-2hvvq\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.837115 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dpw2n"] Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.911308 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hvvq\" (UniqueName: \"kubernetes.io/projected/3431163c-eca9-47ca-93d2-779262d76774-kube-api-access-2hvvq\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.911453 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-catalog-content\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.911487 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-utilities\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.912044 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-utilities\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.912443 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-catalog-content\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:32 crc kubenswrapper[4949]: I0216 11:21:32.934835 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hvvq\" (UniqueName: \"kubernetes.io/projected/3431163c-eca9-47ca-93d2-779262d76774-kube-api-access-2hvvq\") pod \"certified-operators-dpw2n\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:33 crc kubenswrapper[4949]: I0216 11:21:33.043356 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jr6k4" event={"ID":"9a98e119-721e-4977-a450-a6b9d6b1e431","Type":"ContainerStarted","Data":"ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded"} Feb 16 11:21:33 crc kubenswrapper[4949]: I0216 11:21:33.198397 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:33 crc kubenswrapper[4949]: I0216 11:21:33.546234 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dpw2n"] Feb 16 11:21:34 crc kubenswrapper[4949]: I0216 11:21:34.057354 4949 generic.go:334] "Generic (PLEG): container finished" podID="3431163c-eca9-47ca-93d2-779262d76774" containerID="f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565" exitCode=0 Feb 16 11:21:34 crc kubenswrapper[4949]: I0216 11:21:34.057455 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpw2n" event={"ID":"3431163c-eca9-47ca-93d2-779262d76774","Type":"ContainerDied","Data":"f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565"} Feb 16 11:21:34 crc kubenswrapper[4949]: I0216 11:21:34.057511 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpw2n" event={"ID":"3431163c-eca9-47ca-93d2-779262d76774","Type":"ContainerStarted","Data":"39cdcb1e2d77933de35e00092754a0a3e19498c453e5e9db40cd752a6486a447"} Feb 16 11:21:34 crc kubenswrapper[4949]: I0216 11:21:34.060867 4949 generic.go:334] "Generic (PLEG): container finished" podID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerID="ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded" exitCode=0 Feb 16 11:21:34 crc kubenswrapper[4949]: I0216 11:21:34.060942 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jr6k4" event={"ID":"9a98e119-721e-4977-a450-a6b9d6b1e431","Type":"ContainerDied","Data":"ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded"} Feb 16 11:21:35 crc kubenswrapper[4949]: I0216 11:21:35.080098 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jr6k4" event={"ID":"9a98e119-721e-4977-a450-a6b9d6b1e431","Type":"ContainerStarted","Data":"3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac"} Feb 16 11:21:35 crc kubenswrapper[4949]: I0216 11:21:35.083096 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpw2n" event={"ID":"3431163c-eca9-47ca-93d2-779262d76774","Type":"ContainerStarted","Data":"c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6"} Feb 16 11:21:35 crc kubenswrapper[4949]: I0216 11:21:35.104122 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jr6k4" podStartSLOduration=2.578726369 podStartE2EDuration="5.104094178s" podCreationTimestamp="2026-02-16 11:21:30 +0000 UTC" firstStartedPulling="2026-02-16 11:21:32.039529044 +0000 UTC m=+881.668863209" lastFinishedPulling="2026-02-16 11:21:34.564896853 +0000 UTC m=+884.194231018" observedRunningTime="2026-02-16 11:21:35.100953639 +0000 UTC m=+884.730287804" watchObservedRunningTime="2026-02-16 11:21:35.104094178 +0000 UTC m=+884.733428343" Feb 16 11:21:36 crc kubenswrapper[4949]: I0216 11:21:36.099313 4949 generic.go:334] "Generic (PLEG): container finished" podID="3431163c-eca9-47ca-93d2-779262d76774" containerID="c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6" exitCode=0 Feb 16 11:21:36 crc kubenswrapper[4949]: I0216 11:21:36.099383 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpw2n" event={"ID":"3431163c-eca9-47ca-93d2-779262d76774","Type":"ContainerDied","Data":"c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6"} Feb 16 11:21:37 crc kubenswrapper[4949]: I0216 11:21:37.107440 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpw2n" event={"ID":"3431163c-eca9-47ca-93d2-779262d76774","Type":"ContainerStarted","Data":"76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028"} Feb 16 11:21:37 crc kubenswrapper[4949]: I0216 11:21:37.129751 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dpw2n" podStartSLOduration=2.7072689199999997 podStartE2EDuration="5.129732868s" podCreationTimestamp="2026-02-16 11:21:32 +0000 UTC" firstStartedPulling="2026-02-16 11:21:34.059557775 +0000 UTC m=+883.688891940" lastFinishedPulling="2026-02-16 11:21:36.482021683 +0000 UTC m=+886.111355888" observedRunningTime="2026-02-16 11:21:37.124913811 +0000 UTC m=+886.754247976" watchObservedRunningTime="2026-02-16 11:21:37.129732868 +0000 UTC m=+886.759067053" Feb 16 11:21:39 crc kubenswrapper[4949]: I0216 11:21:39.354880 4949 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Feb 16 11:21:39 crc kubenswrapper[4949]: I0216 11:21:39.355692 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="d60af4cc-ac63-40af-8a3a-2933fb25ffc2" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 16 11:21:40 crc kubenswrapper[4949]: I0216 11:21:40.966648 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:40 crc kubenswrapper[4949]: I0216 11:21:40.966704 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.009900 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.191459 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-94t82"] Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.194016 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.209694 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-94t82"] Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.215774 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.274299 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnb5f\" (UniqueName: \"kubernetes.io/projected/850c4c53-3a80-462d-8e41-8c57dc474d00-kube-api-access-rnb5f\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.274374 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-utilities\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.274431 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-catalog-content\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.375708 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnb5f\" (UniqueName: \"kubernetes.io/projected/850c4c53-3a80-462d-8e41-8c57dc474d00-kube-api-access-rnb5f\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.375791 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-utilities\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.375858 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-catalog-content\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.376458 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-catalog-content\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.376546 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-utilities\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.403325 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnb5f\" (UniqueName: \"kubernetes.io/projected/850c4c53-3a80-462d-8e41-8c57dc474d00-kube-api-access-rnb5f\") pod \"redhat-marketplace-94t82\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.514194 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:41 crc kubenswrapper[4949]: I0216 11:21:41.985211 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-94t82"] Feb 16 11:21:42 crc kubenswrapper[4949]: I0216 11:21:42.155970 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-94t82" event={"ID":"850c4c53-3a80-462d-8e41-8c57dc474d00","Type":"ContainerStarted","Data":"10d13a5e404ce92f73e825cdec7ebc89946e69f8b787afec466242623b2a0a82"} Feb 16 11:21:42 crc kubenswrapper[4949]: E0216 11:21:42.340367 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod850c4c53_3a80_462d_8e41_8c57dc474d00.slice/crio-conmon-e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f.scope\": RecentStats: unable to find data in memory cache]" Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.164714 4949 generic.go:334] "Generic (PLEG): container finished" podID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerID="e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f" exitCode=0 Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.164835 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-94t82" event={"ID":"850c4c53-3a80-462d-8e41-8c57dc474d00","Type":"ContainerDied","Data":"e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f"} Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.199455 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.199525 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.245325 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.385475 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jr6k4"] Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.385829 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jr6k4" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerName="registry-server" containerID="cri-o://3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac" gracePeriod=2 Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.835571 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.920943 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-utilities\") pod \"9a98e119-721e-4977-a450-a6b9d6b1e431\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.921092 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-catalog-content\") pod \"9a98e119-721e-4977-a450-a6b9d6b1e431\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.921228 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsk24\" (UniqueName: \"kubernetes.io/projected/9a98e119-721e-4977-a450-a6b9d6b1e431-kube-api-access-nsk24\") pod \"9a98e119-721e-4977-a450-a6b9d6b1e431\" (UID: \"9a98e119-721e-4977-a450-a6b9d6b1e431\") " Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.922657 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-utilities" (OuterVolumeSpecName: "utilities") pod "9a98e119-721e-4977-a450-a6b9d6b1e431" (UID: "9a98e119-721e-4977-a450-a6b9d6b1e431"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:21:43 crc kubenswrapper[4949]: I0216 11:21:43.927310 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a98e119-721e-4977-a450-a6b9d6b1e431-kube-api-access-nsk24" (OuterVolumeSpecName: "kube-api-access-nsk24") pod "9a98e119-721e-4977-a450-a6b9d6b1e431" (UID: "9a98e119-721e-4977-a450-a6b9d6b1e431"). InnerVolumeSpecName "kube-api-access-nsk24". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.024786 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.024850 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsk24\" (UniqueName: \"kubernetes.io/projected/9a98e119-721e-4977-a450-a6b9d6b1e431-kube-api-access-nsk24\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.178492 4949 generic.go:334] "Generic (PLEG): container finished" podID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerID="3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac" exitCode=0 Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.178561 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jr6k4" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.178560 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jr6k4" event={"ID":"9a98e119-721e-4977-a450-a6b9d6b1e431","Type":"ContainerDied","Data":"3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac"} Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.178707 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jr6k4" event={"ID":"9a98e119-721e-4977-a450-a6b9d6b1e431","Type":"ContainerDied","Data":"a460ce2d7e2494d9f0c70cbd3f28a17ff8d811f64fec48f00e93b599ba65f6a6"} Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.178754 4949 scope.go:117] "RemoveContainer" containerID="3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.181217 4949 generic.go:334] "Generic (PLEG): container finished" podID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerID="9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b" exitCode=0 Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.181277 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-94t82" event={"ID":"850c4c53-3a80-462d-8e41-8c57dc474d00","Type":"ContainerDied","Data":"9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b"} Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.198323 4949 scope.go:117] "RemoveContainer" containerID="ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.208943 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a98e119-721e-4977-a450-a6b9d6b1e431" (UID: "9a98e119-721e-4977-a450-a6b9d6b1e431"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.222720 4949 scope.go:117] "RemoveContainer" containerID="4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.226414 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a98e119-721e-4977-a450-a6b9d6b1e431-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.235151 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.259456 4949 scope.go:117] "RemoveContainer" containerID="3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac" Feb 16 11:21:44 crc kubenswrapper[4949]: E0216 11:21:44.260017 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac\": container with ID starting with 3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac not found: ID does not exist" containerID="3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.260065 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac"} err="failed to get container status \"3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac\": rpc error: code = NotFound desc = could not find container \"3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac\": container with ID starting with 3f32e6e8d637b808d767b3fbd5d74ef458ae11d67b334acb4bbfe703f70b93ac not found: ID does not exist" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.260094 4949 scope.go:117] "RemoveContainer" containerID="ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded" Feb 16 11:21:44 crc kubenswrapper[4949]: E0216 11:21:44.260586 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded\": container with ID starting with ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded not found: ID does not exist" containerID="ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.260609 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded"} err="failed to get container status \"ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded\": rpc error: code = NotFound desc = could not find container \"ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded\": container with ID starting with ff8c4b7105acd05f3e26490bcb46a0847e3ffa3b2a225648b05255d92491aded not found: ID does not exist" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.260622 4949 scope.go:117] "RemoveContainer" containerID="4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826" Feb 16 11:21:44 crc kubenswrapper[4949]: E0216 11:21:44.260988 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826\": container with ID starting with 4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826 not found: ID does not exist" containerID="4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.261020 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826"} err="failed to get container status \"4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826\": rpc error: code = NotFound desc = could not find container \"4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826\": container with ID starting with 4747c20335cd4beffa08d6f2226f8f5cc619554881f28b5974b1b829ac761826 not found: ID does not exist" Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.513663 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jr6k4"] Feb 16 11:21:44 crc kubenswrapper[4949]: I0216 11:21:44.521065 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jr6k4"] Feb 16 11:21:45 crc kubenswrapper[4949]: I0216 11:21:45.195880 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-94t82" event={"ID":"850c4c53-3a80-462d-8e41-8c57dc474d00","Type":"ContainerStarted","Data":"9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50"} Feb 16 11:21:45 crc kubenswrapper[4949]: I0216 11:21:45.218842 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-94t82" podStartSLOduration=2.810323697 podStartE2EDuration="4.218823695s" podCreationTimestamp="2026-02-16 11:21:41 +0000 UTC" firstStartedPulling="2026-02-16 11:21:43.167330135 +0000 UTC m=+892.796664300" lastFinishedPulling="2026-02-16 11:21:44.575830133 +0000 UTC m=+894.205164298" observedRunningTime="2026-02-16 11:21:45.215862192 +0000 UTC m=+894.845196367" watchObservedRunningTime="2026-02-16 11:21:45.218823695 +0000 UTC m=+894.848157860" Feb 16 11:21:45 crc kubenswrapper[4949]: I0216 11:21:45.244964 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" path="/var/lib/kubelet/pods/9a98e119-721e-4977-a450-a6b9d6b1e431/volumes" Feb 16 11:21:45 crc kubenswrapper[4949]: I0216 11:21:45.783881 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dpw2n"] Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.205627 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dpw2n" podUID="3431163c-eca9-47ca-93d2-779262d76774" containerName="registry-server" containerID="cri-o://76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028" gracePeriod=2 Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.653490 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.663422 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-catalog-content\") pod \"3431163c-eca9-47ca-93d2-779262d76774\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.663555 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hvvq\" (UniqueName: \"kubernetes.io/projected/3431163c-eca9-47ca-93d2-779262d76774-kube-api-access-2hvvq\") pod \"3431163c-eca9-47ca-93d2-779262d76774\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.664643 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-utilities\") pod \"3431163c-eca9-47ca-93d2-779262d76774\" (UID: \"3431163c-eca9-47ca-93d2-779262d76774\") " Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.667266 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-utilities" (OuterVolumeSpecName: "utilities") pod "3431163c-eca9-47ca-93d2-779262d76774" (UID: "3431163c-eca9-47ca-93d2-779262d76774"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.671718 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3431163c-eca9-47ca-93d2-779262d76774-kube-api-access-2hvvq" (OuterVolumeSpecName: "kube-api-access-2hvvq") pod "3431163c-eca9-47ca-93d2-779262d76774" (UID: "3431163c-eca9-47ca-93d2-779262d76774"). InnerVolumeSpecName "kube-api-access-2hvvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.728269 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3431163c-eca9-47ca-93d2-779262d76774" (UID: "3431163c-eca9-47ca-93d2-779262d76774"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.766524 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.766575 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hvvq\" (UniqueName: \"kubernetes.io/projected/3431163c-eca9-47ca-93d2-779262d76774-kube-api-access-2hvvq\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:46 crc kubenswrapper[4949]: I0216 11:21:46.766597 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3431163c-eca9-47ca-93d2-779262d76774-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.214958 4949 generic.go:334] "Generic (PLEG): container finished" podID="3431163c-eca9-47ca-93d2-779262d76774" containerID="76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028" exitCode=0 Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.215007 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpw2n" event={"ID":"3431163c-eca9-47ca-93d2-779262d76774","Type":"ContainerDied","Data":"76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028"} Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.215038 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpw2n" event={"ID":"3431163c-eca9-47ca-93d2-779262d76774","Type":"ContainerDied","Data":"39cdcb1e2d77933de35e00092754a0a3e19498c453e5e9db40cd752a6486a447"} Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.215048 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpw2n" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.215069 4949 scope.go:117] "RemoveContainer" containerID="76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.243208 4949 scope.go:117] "RemoveContainer" containerID="c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.267022 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dpw2n"] Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.277039 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dpw2n"] Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.279570 4949 scope.go:117] "RemoveContainer" containerID="f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.301709 4949 scope.go:117] "RemoveContainer" containerID="76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028" Feb 16 11:21:47 crc kubenswrapper[4949]: E0216 11:21:47.302256 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028\": container with ID starting with 76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028 not found: ID does not exist" containerID="76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.302293 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028"} err="failed to get container status \"76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028\": rpc error: code = NotFound desc = could not find container \"76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028\": container with ID starting with 76b2a61d55fc2226b46793bfd76ad876f3ab481f06c28a045127120202d64028 not found: ID does not exist" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.302319 4949 scope.go:117] "RemoveContainer" containerID="c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6" Feb 16 11:21:47 crc kubenswrapper[4949]: E0216 11:21:47.302780 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6\": container with ID starting with c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6 not found: ID does not exist" containerID="c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.302818 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6"} err="failed to get container status \"c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6\": rpc error: code = NotFound desc = could not find container \"c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6\": container with ID starting with c633158ab22b24ad4df5a61467d409001c2067d7ce96825f01880907f8ea4fe6 not found: ID does not exist" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.302845 4949 scope.go:117] "RemoveContainer" containerID="f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565" Feb 16 11:21:47 crc kubenswrapper[4949]: E0216 11:21:47.303505 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565\": container with ID starting with f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565 not found: ID does not exist" containerID="f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565" Feb 16 11:21:47 crc kubenswrapper[4949]: I0216 11:21:47.303530 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565"} err="failed to get container status \"f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565\": rpc error: code = NotFound desc = could not find container \"f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565\": container with ID starting with f57b0e8102adbc9d80ad806e5a0a764679c85841e695852d01e30dfa22c48565 not found: ID does not exist" Feb 16 11:21:49 crc kubenswrapper[4949]: I0216 11:21:49.279626 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3431163c-eca9-47ca-93d2-779262d76774" path="/var/lib/kubelet/pods/3431163c-eca9-47ca-93d2-779262d76774/volumes" Feb 16 11:21:49 crc kubenswrapper[4949]: I0216 11:21:49.350265 4949 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Feb 16 11:21:49 crc kubenswrapper[4949]: I0216 11:21:49.350316 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="d60af4cc-ac63-40af-8a3a-2933fb25ffc2" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 16 11:21:51 crc kubenswrapper[4949]: I0216 11:21:51.514592 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:51 crc kubenswrapper[4949]: I0216 11:21:51.515220 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:51 crc kubenswrapper[4949]: I0216 11:21:51.584187 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:52 crc kubenswrapper[4949]: I0216 11:21:52.316656 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:52 crc kubenswrapper[4949]: I0216 11:21:52.380631 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-94t82"] Feb 16 11:21:54 crc kubenswrapper[4949]: I0216 11:21:54.283394 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-94t82" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerName="registry-server" containerID="cri-o://9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50" gracePeriod=2 Feb 16 11:21:54 crc kubenswrapper[4949]: I0216 11:21:54.757309 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:54 crc kubenswrapper[4949]: I0216 11:21:54.917139 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-catalog-content\") pod \"850c4c53-3a80-462d-8e41-8c57dc474d00\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " Feb 16 11:21:54 crc kubenswrapper[4949]: I0216 11:21:54.917222 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-utilities\") pod \"850c4c53-3a80-462d-8e41-8c57dc474d00\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " Feb 16 11:21:54 crc kubenswrapper[4949]: I0216 11:21:54.917292 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnb5f\" (UniqueName: \"kubernetes.io/projected/850c4c53-3a80-462d-8e41-8c57dc474d00-kube-api-access-rnb5f\") pod \"850c4c53-3a80-462d-8e41-8c57dc474d00\" (UID: \"850c4c53-3a80-462d-8e41-8c57dc474d00\") " Feb 16 11:21:54 crc kubenswrapper[4949]: I0216 11:21:54.918690 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-utilities" (OuterVolumeSpecName: "utilities") pod "850c4c53-3a80-462d-8e41-8c57dc474d00" (UID: "850c4c53-3a80-462d-8e41-8c57dc474d00"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:21:54 crc kubenswrapper[4949]: I0216 11:21:54.936496 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/850c4c53-3a80-462d-8e41-8c57dc474d00-kube-api-access-rnb5f" (OuterVolumeSpecName: "kube-api-access-rnb5f") pod "850c4c53-3a80-462d-8e41-8c57dc474d00" (UID: "850c4c53-3a80-462d-8e41-8c57dc474d00"). InnerVolumeSpecName "kube-api-access-rnb5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:21:54 crc kubenswrapper[4949]: I0216 11:21:54.951491 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "850c4c53-3a80-462d-8e41-8c57dc474d00" (UID: "850c4c53-3a80-462d-8e41-8c57dc474d00"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.090473 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.090522 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/850c4c53-3a80-462d-8e41-8c57dc474d00-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.090536 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnb5f\" (UniqueName: \"kubernetes.io/projected/850c4c53-3a80-462d-8e41-8c57dc474d00-kube-api-access-rnb5f\") on node \"crc\" DevicePath \"\"" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.295638 4949 generic.go:334] "Generic (PLEG): container finished" podID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerID="9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50" exitCode=0 Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.295729 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-94t82" event={"ID":"850c4c53-3a80-462d-8e41-8c57dc474d00","Type":"ContainerDied","Data":"9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50"} Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.295824 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-94t82" event={"ID":"850c4c53-3a80-462d-8e41-8c57dc474d00","Type":"ContainerDied","Data":"10d13a5e404ce92f73e825cdec7ebc89946e69f8b787afec466242623b2a0a82"} Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.295874 4949 scope.go:117] "RemoveContainer" containerID="9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.295743 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-94t82" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.326441 4949 scope.go:117] "RemoveContainer" containerID="9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.326815 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-94t82"] Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.333326 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-94t82"] Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.355661 4949 scope.go:117] "RemoveContainer" containerID="e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.375640 4949 scope.go:117] "RemoveContainer" containerID="9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50" Feb 16 11:21:55 crc kubenswrapper[4949]: E0216 11:21:55.376210 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50\": container with ID starting with 9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50 not found: ID does not exist" containerID="9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.376279 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50"} err="failed to get container status \"9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50\": rpc error: code = NotFound desc = could not find container \"9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50\": container with ID starting with 9a59ae0b2c58d2d4552305b3786788c16929a4457495d54852cc54be202fec50 not found: ID does not exist" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.376326 4949 scope.go:117] "RemoveContainer" containerID="9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b" Feb 16 11:21:55 crc kubenswrapper[4949]: E0216 11:21:55.376710 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b\": container with ID starting with 9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b not found: ID does not exist" containerID="9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.376753 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b"} err="failed to get container status \"9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b\": rpc error: code = NotFound desc = could not find container \"9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b\": container with ID starting with 9dc1dfb62528c898700392ea90487f0d535acbd0c45a2bf719b7cd7f3239341b not found: ID does not exist" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.376789 4949 scope.go:117] "RemoveContainer" containerID="e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f" Feb 16 11:21:55 crc kubenswrapper[4949]: E0216 11:21:55.377068 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f\": container with ID starting with e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f not found: ID does not exist" containerID="e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f" Feb 16 11:21:55 crc kubenswrapper[4949]: I0216 11:21:55.377107 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f"} err="failed to get container status \"e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f\": rpc error: code = NotFound desc = could not find container \"e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f\": container with ID starting with e372d1ca959b7638eba21314ba46dc95b7a1cb9c3b00a593a01531770cdab45f not found: ID does not exist" Feb 16 11:21:57 crc kubenswrapper[4949]: I0216 11:21:57.245581 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" path="/var/lib/kubelet/pods/850c4c53-3a80-462d-8e41-8c57dc474d00/volumes" Feb 16 11:21:59 crc kubenswrapper[4949]: I0216 11:21:59.350425 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.888030 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-9p5gd"] Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.888904 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerName="extract-content" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.888920 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerName="extract-content" Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.888930 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3431163c-eca9-47ca-93d2-779262d76774" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.888938 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3431163c-eca9-47ca-93d2-779262d76774" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.888959 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3431163c-eca9-47ca-93d2-779262d76774" containerName="extract-utilities" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.888968 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3431163c-eca9-47ca-93d2-779262d76774" containerName="extract-utilities" Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.888983 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.888990 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.889002 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.889008 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.889020 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerName="extract-utilities" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.889027 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerName="extract-utilities" Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.889041 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerName="extract-content" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.889049 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerName="extract-content" Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.889058 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerName="extract-utilities" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.889064 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerName="extract-utilities" Feb 16 11:22:15 crc kubenswrapper[4949]: E0216 11:22:15.889073 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3431163c-eca9-47ca-93d2-779262d76774" containerName="extract-content" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.889080 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3431163c-eca9-47ca-93d2-779262d76774" containerName="extract-content" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.889279 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3431163c-eca9-47ca-93d2-779262d76774" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.889302 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a98e119-721e-4977-a450-a6b9d6b1e431" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.889317 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="850c4c53-3a80-462d-8e41-8c57dc474d00" containerName="registry-server" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.890038 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-9p5gd" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.892689 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.892717 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.892697 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.893130 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-t5d66" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.893262 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.913317 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-9p5gd"] Feb 16 11:22:15 crc kubenswrapper[4949]: I0216 11:22:15.922619 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.036869 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.036932 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-token\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.036986 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.037025 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-entrypoint\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.037050 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcrgs\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-kube-api-access-hcrgs\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.037082 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-sa-token\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.037102 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config-openshift-service-cacrt\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.037122 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-tmp\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.037141 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-trusted-ca\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.037161 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-metrics\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.037193 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-datadir\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.050014 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-9p5gd"] Feb 16 11:22:16 crc kubenswrapper[4949]: E0216 11:22:16.050737 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-hcrgs metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-9p5gd" podUID="ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.139083 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.139139 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-entrypoint\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: E0216 11:22:16.139316 4949 secret.go:188] Couldn't get secret openshift-logging/collector-syslog-receiver: secret "collector-syslog-receiver" not found Feb 16 11:22:16 crc kubenswrapper[4949]: E0216 11:22:16.139404 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver podName:ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059 nodeName:}" failed. No retries permitted until 2026-02-16 11:22:16.639384484 +0000 UTC m=+926.268718649 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "collector-syslog-receiver" (UniqueName: "kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver") pod "collector-9p5gd" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059") : secret "collector-syslog-receiver" not found Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.139926 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-entrypoint\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.140011 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcrgs\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-kube-api-access-hcrgs\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.140363 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-sa-token\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.141203 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config-openshift-service-cacrt\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.141320 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-tmp\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.141415 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-trusted-ca\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.141515 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-metrics\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.141595 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-datadir\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.141768 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.142623 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-token\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.141641 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config-openshift-service-cacrt\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.142505 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-trusted-ca\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.142535 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.141670 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-datadir\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.150189 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-tmp\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.150499 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-metrics\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.150692 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-token\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.162510 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcrgs\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-kube-api-access-hcrgs\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.164626 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-sa-token\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.458020 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.467561 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.547725 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcrgs\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-kube-api-access-hcrgs\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.547787 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-metrics\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.547820 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-token\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.547883 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-trusted-ca\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.547927 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-tmp\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.547961 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config-openshift-service-cacrt\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.547995 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-sa-token\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.548060 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-entrypoint\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.548085 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-datadir\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.548105 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.548905 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.548966 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-datadir" (OuterVolumeSpecName: "datadir") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.548943 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config" (OuterVolumeSpecName: "config") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.548957 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.549592 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.551534 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-sa-token" (OuterVolumeSpecName: "sa-token") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.552358 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-token" (OuterVolumeSpecName: "collector-token") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.553351 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-metrics" (OuterVolumeSpecName: "metrics") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.553352 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-kube-api-access-hcrgs" (OuterVolumeSpecName: "kube-api-access-hcrgs") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "kube-api-access-hcrgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.554447 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-tmp" (OuterVolumeSpecName: "tmp") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.649891 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650264 4949 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-entrypoint\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650333 4949 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-datadir\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650434 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650502 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcrgs\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-kube-api-access-hcrgs\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650560 4949 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-metrics\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650618 4949 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-token\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650678 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650760 4949 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-tmp\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650834 4949 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.650933 4949 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-sa-token\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.652967 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver\") pod \"collector-9p5gd\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " pod="openshift-logging/collector-9p5gd" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.751801 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver\") pod \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\" (UID: \"ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059\") " Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.754326 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" (UID: "ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:22:16 crc kubenswrapper[4949]: I0216 11:22:16.853923 4949 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.465220 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-9p5gd" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.524145 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-9p5gd"] Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.535087 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-9p5gd"] Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.541514 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-tv7fp"] Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.542859 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.546550 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.546628 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.546795 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.547043 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-t5d66" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.547193 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.547245 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-tv7fp"] Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.553685 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.668987 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-tmp\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669036 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-collector-token\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669069 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-trusted-ca\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669122 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-sa-token\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669184 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-metrics\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669254 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-entrypoint\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669293 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dwf6\" (UniqueName: \"kubernetes.io/projected/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-kube-api-access-6dwf6\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669349 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-config\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669370 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-datadir\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669396 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-config-openshift-service-cacrt\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.669427 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-collector-syslog-receiver\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.771491 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-metrics\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772013 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-entrypoint\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772044 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dwf6\" (UniqueName: \"kubernetes.io/projected/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-kube-api-access-6dwf6\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772086 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-config\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772105 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-datadir\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772123 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-config-openshift-service-cacrt\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772151 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-collector-syslog-receiver\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772189 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-tmp\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772210 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-collector-token\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772230 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-trusted-ca\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772256 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-sa-token\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.772226 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-datadir\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.773046 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-config-openshift-service-cacrt\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.773225 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-entrypoint\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.773261 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-config\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.773706 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-trusted-ca\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.776426 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-collector-syslog-receiver\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.776618 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-tmp\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.776619 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-collector-token\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.778094 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-metrics\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.795734 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-sa-token\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.796536 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dwf6\" (UniqueName: \"kubernetes.io/projected/dfef0ca9-030f-4e8f-9804-ac2000c6bc75-kube-api-access-6dwf6\") pod \"collector-tv7fp\" (UID: \"dfef0ca9-030f-4e8f-9804-ac2000c6bc75\") " pod="openshift-logging/collector-tv7fp" Feb 16 11:22:17 crc kubenswrapper[4949]: I0216 11:22:17.860975 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-tv7fp" Feb 16 11:22:18 crc kubenswrapper[4949]: I0216 11:22:18.331876 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-tv7fp"] Feb 16 11:22:18 crc kubenswrapper[4949]: I0216 11:22:18.472247 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-tv7fp" event={"ID":"dfef0ca9-030f-4e8f-9804-ac2000c6bc75","Type":"ContainerStarted","Data":"711ab5c590687193f9a5757b6dc1b22a95f1d9467a83a7ce8ee85e74e6fdc639"} Feb 16 11:22:19 crc kubenswrapper[4949]: I0216 11:22:19.244306 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059" path="/var/lib/kubelet/pods/ae85a7ca-8fff-4f3f-afbb-ba7cde6cf059/volumes" Feb 16 11:22:26 crc kubenswrapper[4949]: I0216 11:22:26.540756 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-tv7fp" event={"ID":"dfef0ca9-030f-4e8f-9804-ac2000c6bc75","Type":"ContainerStarted","Data":"2ced45334e117c2d37dd2e99ea858948ef222e4fe17c80b85163d9cfcdb6dc8f"} Feb 16 11:22:26 crc kubenswrapper[4949]: I0216 11:22:26.566145 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-tv7fp" podStartSLOduration=2.449829825 podStartE2EDuration="9.565926456s" podCreationTimestamp="2026-02-16 11:22:17 +0000 UTC" firstStartedPulling="2026-02-16 11:22:18.34032047 +0000 UTC m=+927.969654635" lastFinishedPulling="2026-02-16 11:22:25.456417101 +0000 UTC m=+935.085751266" observedRunningTime="2026-02-16 11:22:26.558845195 +0000 UTC m=+936.188179360" watchObservedRunningTime="2026-02-16 11:22:26.565926456 +0000 UTC m=+936.195260621" Feb 16 11:22:55 crc kubenswrapper[4949]: I0216 11:22:55.839772 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4"] Feb 16 11:22:55 crc kubenswrapper[4949]: I0216 11:22:55.842608 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:55 crc kubenswrapper[4949]: I0216 11:22:55.844964 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 16 11:22:55 crc kubenswrapper[4949]: I0216 11:22:55.853066 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4"] Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.020318 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzrvl\" (UniqueName: \"kubernetes.io/projected/1d26d747-8606-4803-a80f-fe1b8ae10e24-kube-api-access-gzrvl\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.020468 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-util\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.020512 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-bundle\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.122599 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzrvl\" (UniqueName: \"kubernetes.io/projected/1d26d747-8606-4803-a80f-fe1b8ae10e24-kube-api-access-gzrvl\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.122657 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-util\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.122679 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-bundle\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.123279 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-bundle\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.123457 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-util\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.143136 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzrvl\" (UniqueName: \"kubernetes.io/projected/1d26d747-8606-4803-a80f-fe1b8ae10e24-kube-api-access-gzrvl\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:56 crc kubenswrapper[4949]: I0216 11:22:56.159366 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:22:57 crc kubenswrapper[4949]: I0216 11:22:57.013585 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4"] Feb 16 11:22:57 crc kubenswrapper[4949]: I0216 11:22:57.782000 4949 generic.go:334] "Generic (PLEG): container finished" podID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerID="3fcfe60da66bfc937e56ed0ffba9849463fc4fa725bea76a66c55b68ae5da81f" exitCode=0 Feb 16 11:22:57 crc kubenswrapper[4949]: I0216 11:22:57.782074 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" event={"ID":"1d26d747-8606-4803-a80f-fe1b8ae10e24","Type":"ContainerDied","Data":"3fcfe60da66bfc937e56ed0ffba9849463fc4fa725bea76a66c55b68ae5da81f"} Feb 16 11:22:57 crc kubenswrapper[4949]: I0216 11:22:57.782534 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" event={"ID":"1d26d747-8606-4803-a80f-fe1b8ae10e24","Type":"ContainerStarted","Data":"92fea600d8988fae488ef6199d9a1b0db35b07ee70580ed7b408282fbd0b3530"} Feb 16 11:22:59 crc kubenswrapper[4949]: I0216 11:22:59.801000 4949 generic.go:334] "Generic (PLEG): container finished" podID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerID="38dd8d2d466851b4d08d2da65ee3dcea4caa222ca79e40859070e9637868ec10" exitCode=0 Feb 16 11:22:59 crc kubenswrapper[4949]: I0216 11:22:59.804067 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" event={"ID":"1d26d747-8606-4803-a80f-fe1b8ae10e24","Type":"ContainerDied","Data":"38dd8d2d466851b4d08d2da65ee3dcea4caa222ca79e40859070e9637868ec10"} Feb 16 11:23:00 crc kubenswrapper[4949]: I0216 11:23:00.813791 4949 generic.go:334] "Generic (PLEG): container finished" podID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerID="0539b0eed53d249217ab9b895a9cece81ade08aa7e8f91a637c0beb7451d3871" exitCode=0 Feb 16 11:23:00 crc kubenswrapper[4949]: I0216 11:23:00.813935 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" event={"ID":"1d26d747-8606-4803-a80f-fe1b8ae10e24","Type":"ContainerDied","Data":"0539b0eed53d249217ab9b895a9cece81ade08aa7e8f91a637c0beb7451d3871"} Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.126976 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.167108 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzrvl\" (UniqueName: \"kubernetes.io/projected/1d26d747-8606-4803-a80f-fe1b8ae10e24-kube-api-access-gzrvl\") pod \"1d26d747-8606-4803-a80f-fe1b8ae10e24\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.167420 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-util\") pod \"1d26d747-8606-4803-a80f-fe1b8ae10e24\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.167481 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-bundle\") pod \"1d26d747-8606-4803-a80f-fe1b8ae10e24\" (UID: \"1d26d747-8606-4803-a80f-fe1b8ae10e24\") " Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.171948 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-bundle" (OuterVolumeSpecName: "bundle") pod "1d26d747-8606-4803-a80f-fe1b8ae10e24" (UID: "1d26d747-8606-4803-a80f-fe1b8ae10e24"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.177694 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d26d747-8606-4803-a80f-fe1b8ae10e24-kube-api-access-gzrvl" (OuterVolumeSpecName: "kube-api-access-gzrvl") pod "1d26d747-8606-4803-a80f-fe1b8ae10e24" (UID: "1d26d747-8606-4803-a80f-fe1b8ae10e24"). InnerVolumeSpecName "kube-api-access-gzrvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.189312 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-util" (OuterVolumeSpecName: "util") pod "1d26d747-8606-4803-a80f-fe1b8ae10e24" (UID: "1d26d747-8606-4803-a80f-fe1b8ae10e24"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.273556 4949 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-util\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.273598 4949 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1d26d747-8606-4803-a80f-fe1b8ae10e24-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.273612 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzrvl\" (UniqueName: \"kubernetes.io/projected/1d26d747-8606-4803-a80f-fe1b8ae10e24-kube-api-access-gzrvl\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.835926 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" event={"ID":"1d26d747-8606-4803-a80f-fe1b8ae10e24","Type":"ContainerDied","Data":"92fea600d8988fae488ef6199d9a1b0db35b07ee70580ed7b408282fbd0b3530"} Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.836036 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92fea600d8988fae488ef6199d9a1b0db35b07ee70580ed7b408282fbd0b3530" Feb 16 11:23:02 crc kubenswrapper[4949]: I0216 11:23:02.836203 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4" Feb 16 11:23:04 crc kubenswrapper[4949]: I0216 11:23:04.550574 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:23:04 crc kubenswrapper[4949]: I0216 11:23:04.551161 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.829134 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-694c9596b7-z2z4x"] Feb 16 11:23:07 crc kubenswrapper[4949]: E0216 11:23:07.829823 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerName="util" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.829841 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerName="util" Feb 16 11:23:07 crc kubenswrapper[4949]: E0216 11:23:07.829857 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerName="pull" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.829865 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerName="pull" Feb 16 11:23:07 crc kubenswrapper[4949]: E0216 11:23:07.829884 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerName="extract" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.829892 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerName="extract" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.830051 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d26d747-8606-4803-a80f-fe1b8ae10e24" containerName="extract" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.830784 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-694c9596b7-z2z4x" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.834732 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.835267 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-fcbcd" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.835548 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.856895 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-694c9596b7-z2z4x"] Feb 16 11:23:07 crc kubenswrapper[4949]: I0216 11:23:07.914717 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxslv\" (UniqueName: \"kubernetes.io/projected/8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15-kube-api-access-bxslv\") pod \"nmstate-operator-694c9596b7-z2z4x\" (UID: \"8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15\") " pod="openshift-nmstate/nmstate-operator-694c9596b7-z2z4x" Feb 16 11:23:08 crc kubenswrapper[4949]: I0216 11:23:08.016871 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxslv\" (UniqueName: \"kubernetes.io/projected/8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15-kube-api-access-bxslv\") pod \"nmstate-operator-694c9596b7-z2z4x\" (UID: \"8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15\") " pod="openshift-nmstate/nmstate-operator-694c9596b7-z2z4x" Feb 16 11:23:08 crc kubenswrapper[4949]: I0216 11:23:08.050076 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxslv\" (UniqueName: \"kubernetes.io/projected/8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15-kube-api-access-bxslv\") pod \"nmstate-operator-694c9596b7-z2z4x\" (UID: \"8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15\") " pod="openshift-nmstate/nmstate-operator-694c9596b7-z2z4x" Feb 16 11:23:08 crc kubenswrapper[4949]: I0216 11:23:08.151660 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-694c9596b7-z2z4x" Feb 16 11:23:08 crc kubenswrapper[4949]: I0216 11:23:08.619663 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-694c9596b7-z2z4x"] Feb 16 11:23:08 crc kubenswrapper[4949]: I0216 11:23:08.889220 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-694c9596b7-z2z4x" event={"ID":"8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15","Type":"ContainerStarted","Data":"8313b241eef9b57f0389e36fd49782740e791338657cbb3f2b3083ba228b32f8"} Feb 16 11:23:10 crc kubenswrapper[4949]: I0216 11:23:10.917952 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-694c9596b7-z2z4x" event={"ID":"8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15","Type":"ContainerStarted","Data":"d3c0b1217c42b911f2dfc3fc6e9da1576ef8fd59b6e6e806716614dad83960c3"} Feb 16 11:23:10 crc kubenswrapper[4949]: I0216 11:23:10.951066 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-694c9596b7-z2z4x" podStartSLOduration=1.899723313 podStartE2EDuration="3.951029372s" podCreationTimestamp="2026-02-16 11:23:07 +0000 UTC" firstStartedPulling="2026-02-16 11:23:08.627051215 +0000 UTC m=+978.256385380" lastFinishedPulling="2026-02-16 11:23:10.678357244 +0000 UTC m=+980.307691439" observedRunningTime="2026-02-16 11:23:10.943391395 +0000 UTC m=+980.572725570" watchObservedRunningTime="2026-02-16 11:23:10.951029372 +0000 UTC m=+980.580363547" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.061328 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-58c85c668d-rnprv"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.063692 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.066276 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-d7t89" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.075284 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-58c85c668d-rnprv"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.092851 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.094314 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.099737 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.129628 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlwkk\" (UniqueName: \"kubernetes.io/projected/dab83125-a691-419c-b901-8476bc8881d4-kube-api-access-vlwkk\") pod \"nmstate-metrics-58c85c668d-rnprv\" (UID: \"dab83125-a691-419c-b901-8476bc8881d4\") " pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.143248 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-xtxbl"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.144694 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.155312 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.231354 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlghq\" (UniqueName: \"kubernetes.io/projected/0e02bbc5-1e5b-41da-b16b-c42a001af050-kube-api-access-tlghq\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.231427 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-nmstate-lock\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.231467 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1-tls-key-pair\") pod \"nmstate-webhook-866bcb46dc-qbzzp\" (UID: \"50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.231497 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-dbus-socket\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.231529 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlwkk\" (UniqueName: \"kubernetes.io/projected/dab83125-a691-419c-b901-8476bc8881d4-kube-api-access-vlwkk\") pod \"nmstate-metrics-58c85c668d-rnprv\" (UID: \"dab83125-a691-419c-b901-8476bc8881d4\") " pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.231552 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggckx\" (UniqueName: \"kubernetes.io/projected/50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1-kube-api-access-ggckx\") pod \"nmstate-webhook-866bcb46dc-qbzzp\" (UID: \"50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.231603 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-ovs-socket\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.246761 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.248299 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.252144 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-twjp6" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.252483 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.252547 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.263139 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlwkk\" (UniqueName: \"kubernetes.io/projected/dab83125-a691-419c-b901-8476bc8881d4-kube-api-access-vlwkk\") pod \"nmstate-metrics-58c85c668d-rnprv\" (UID: \"dab83125-a691-419c-b901-8476bc8881d4\") " pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.266574 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333185 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1-tls-key-pair\") pod \"nmstate-webhook-866bcb46dc-qbzzp\" (UID: \"50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333267 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/84824090-52b6-4a69-ad49-b441b666b14d-nginx-conf\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333299 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-dbus-socket\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333339 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggckx\" (UniqueName: \"kubernetes.io/projected/50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1-kube-api-access-ggckx\") pod \"nmstate-webhook-866bcb46dc-qbzzp\" (UID: \"50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333405 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-ovs-socket\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333463 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/84824090-52b6-4a69-ad49-b441b666b14d-plugin-serving-cert\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333500 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dvp5\" (UniqueName: \"kubernetes.io/projected/84824090-52b6-4a69-ad49-b441b666b14d-kube-api-access-2dvp5\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333546 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlghq\" (UniqueName: \"kubernetes.io/projected/0e02bbc5-1e5b-41da-b16b-c42a001af050-kube-api-access-tlghq\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333588 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-nmstate-lock\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.333820 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-dbus-socket\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.335423 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-nmstate-lock\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.335474 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/0e02bbc5-1e5b-41da-b16b-c42a001af050-ovs-socket\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.348266 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1-tls-key-pair\") pod \"nmstate-webhook-866bcb46dc-qbzzp\" (UID: \"50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.359696 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggckx\" (UniqueName: \"kubernetes.io/projected/50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1-kube-api-access-ggckx\") pod \"nmstate-webhook-866bcb46dc-qbzzp\" (UID: \"50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.360079 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlghq\" (UniqueName: \"kubernetes.io/projected/0e02bbc5-1e5b-41da-b16b-c42a001af050-kube-api-access-tlghq\") pod \"nmstate-handler-xtxbl\" (UID: \"0e02bbc5-1e5b-41da-b16b-c42a001af050\") " pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.392252 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.422638 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.435883 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/84824090-52b6-4a69-ad49-b441b666b14d-nginx-conf\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.439202 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/84824090-52b6-4a69-ad49-b441b666b14d-plugin-serving-cert\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.439286 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dvp5\" (UniqueName: \"kubernetes.io/projected/84824090-52b6-4a69-ad49-b441b666b14d-kube-api-access-2dvp5\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: E0216 11:23:17.439445 4949 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Feb 16 11:23:17 crc kubenswrapper[4949]: E0216 11:23:17.439558 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84824090-52b6-4a69-ad49-b441b666b14d-plugin-serving-cert podName:84824090-52b6-4a69-ad49-b441b666b14d nodeName:}" failed. No retries permitted until 2026-02-16 11:23:17.939528256 +0000 UTC m=+987.568862421 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/84824090-52b6-4a69-ad49-b441b666b14d-plugin-serving-cert") pod "nmstate-console-plugin-5c78fc5d65-6zbjh" (UID: "84824090-52b6-4a69-ad49-b441b666b14d") : secret "plugin-serving-cert" not found Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.439571 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/84824090-52b6-4a69-ad49-b441b666b14d-nginx-conf\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.463583 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dvp5\" (UniqueName: \"kubernetes.io/projected/84824090-52b6-4a69-ad49-b441b666b14d-kube-api-access-2dvp5\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.466560 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.492627 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-757d775c7-shl7t"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.494578 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.513226 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-757d775c7-shl7t"] Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.642810 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-oauth-config\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.643571 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-trusted-ca-bundle\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.643664 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-serving-cert\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.643710 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl47r\" (UniqueName: \"kubernetes.io/projected/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-kube-api-access-bl47r\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.643731 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-oauth-serving-cert\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.643757 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-config\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.643780 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-service-ca\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.745501 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-serving-cert\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.745561 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl47r\" (UniqueName: \"kubernetes.io/projected/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-kube-api-access-bl47r\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.745592 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-oauth-serving-cert\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.745629 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-config\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.745658 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-service-ca\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.745778 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-oauth-config\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.745805 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-trusted-ca-bundle\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.748470 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-oauth-serving-cert\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.751374 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-trusted-ca-bundle\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.752551 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-config\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.753800 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-service-ca\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.754836 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-serving-cert\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.763188 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-oauth-config\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.777026 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl47r\" (UniqueName: \"kubernetes.io/projected/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-kube-api-access-bl47r\") pod \"console-757d775c7-shl7t\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.842858 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.864239 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp"] Feb 16 11:23:17 crc kubenswrapper[4949]: W0216 11:23:17.875333 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50b488d3_6c66_4a7d_baaf_e7b7c30ca1f1.slice/crio-f230803737b5359920b256986f64809a2418b1ed2dfb279c97b751a24f0e08ce WatchSource:0}: Error finding container f230803737b5359920b256986f64809a2418b1ed2dfb279c97b751a24f0e08ce: Status 404 returned error can't find the container with id f230803737b5359920b256986f64809a2418b1ed2dfb279c97b751a24f0e08ce Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.949793 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/84824090-52b6-4a69-ad49-b441b666b14d-plugin-serving-cert\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.955644 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/84824090-52b6-4a69-ad49-b441b666b14d-plugin-serving-cert\") pod \"nmstate-console-plugin-5c78fc5d65-6zbjh\" (UID: \"84824090-52b6-4a69-ad49-b441b666b14d\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.985046 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-xtxbl" event={"ID":"0e02bbc5-1e5b-41da-b16b-c42a001af050","Type":"ContainerStarted","Data":"fd674fc663465a318084f5385e56543e3bcfc4dd134e6eee558b40f42ce59618"} Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.986083 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" event={"ID":"50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1","Type":"ContainerStarted","Data":"f230803737b5359920b256986f64809a2418b1ed2dfb279c97b751a24f0e08ce"} Feb 16 11:23:17 crc kubenswrapper[4949]: I0216 11:23:17.989515 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-58c85c668d-rnprv"] Feb 16 11:23:18 crc kubenswrapper[4949]: I0216 11:23:18.204493 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" Feb 16 11:23:18 crc kubenswrapper[4949]: I0216 11:23:18.349660 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-757d775c7-shl7t"] Feb 16 11:23:18 crc kubenswrapper[4949]: I0216 11:23:18.707880 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh"] Feb 16 11:23:19 crc kubenswrapper[4949]: I0216 11:23:19.007989 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-757d775c7-shl7t" event={"ID":"c4ac199e-a5f1-4fc7-ad0f-ce907557928d","Type":"ContainerStarted","Data":"dc828a6055f2d9d222c312230d65386d41faf576c5f5ecfd9e85c06814e64733"} Feb 16 11:23:19 crc kubenswrapper[4949]: I0216 11:23:19.008561 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-757d775c7-shl7t" event={"ID":"c4ac199e-a5f1-4fc7-ad0f-ce907557928d","Type":"ContainerStarted","Data":"59bcb39fdc28f782ef304ed5482cfff2aedc5d962b7b92d6833ee76c87f8486c"} Feb 16 11:23:19 crc kubenswrapper[4949]: I0216 11:23:19.010879 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" event={"ID":"dab83125-a691-419c-b901-8476bc8881d4","Type":"ContainerStarted","Data":"e6ce170b5ccdb6c52f26b8d36ebf2d8d124b62326d74ee4c1d56ba4c42c5b3e8"} Feb 16 11:23:19 crc kubenswrapper[4949]: I0216 11:23:19.012902 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" event={"ID":"84824090-52b6-4a69-ad49-b441b666b14d","Type":"ContainerStarted","Data":"8a7a1cc4b8b274053882f8b31a6d0c95835abf6d61f5b5ae3ed6d2d23526278c"} Feb 16 11:23:19 crc kubenswrapper[4949]: I0216 11:23:19.035956 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-757d775c7-shl7t" podStartSLOduration=2.035932786 podStartE2EDuration="2.035932786s" podCreationTimestamp="2026-02-16 11:23:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:23:19.034737812 +0000 UTC m=+988.664071987" watchObservedRunningTime="2026-02-16 11:23:19.035932786 +0000 UTC m=+988.665266951" Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.080485 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" event={"ID":"84824090-52b6-4a69-ad49-b441b666b14d","Type":"ContainerStarted","Data":"8ba5ec9d7ea98d3cc689fe0cafdc2b1754945d5de4c029b76c2a961c966c4be7"} Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.083026 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" event={"ID":"50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1","Type":"ContainerStarted","Data":"53e438dc83dc82d4ac2c8db7a4988d5358f30df64d763262e82e2f3b59e80c54"} Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.083298 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.085101 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-xtxbl" event={"ID":"0e02bbc5-1e5b-41da-b16b-c42a001af050","Type":"ContainerStarted","Data":"2dd96480ddbd1c88bb90dd3d7e942bf6e3520006db495ec72cb13ff5c5b5ad56"} Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.085217 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.086936 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" event={"ID":"dab83125-a691-419c-b901-8476bc8881d4","Type":"ContainerStarted","Data":"d92964607b573d2a614f243eeed9a81dffa9445c66a4fc208805c710e12858e8"} Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.102104 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-6zbjh" podStartSLOduration=2.5492338820000002 podStartE2EDuration="5.102072063s" podCreationTimestamp="2026-02-16 11:23:17 +0000 UTC" firstStartedPulling="2026-02-16 11:23:18.734601605 +0000 UTC m=+988.363935770" lastFinishedPulling="2026-02-16 11:23:21.287439786 +0000 UTC m=+990.916773951" observedRunningTime="2026-02-16 11:23:22.098641415 +0000 UTC m=+991.727975580" watchObservedRunningTime="2026-02-16 11:23:22.102072063 +0000 UTC m=+991.731406218" Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.120750 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-xtxbl" podStartSLOduration=1.371434651 podStartE2EDuration="5.120715882s" podCreationTimestamp="2026-02-16 11:23:17 +0000 UTC" firstStartedPulling="2026-02-16 11:23:17.549158707 +0000 UTC m=+987.178492872" lastFinishedPulling="2026-02-16 11:23:21.298439938 +0000 UTC m=+990.927774103" observedRunningTime="2026-02-16 11:23:22.119709503 +0000 UTC m=+991.749043678" watchObservedRunningTime="2026-02-16 11:23:22.120715882 +0000 UTC m=+991.750050047" Feb 16 11:23:22 crc kubenswrapper[4949]: I0216 11:23:22.144577 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" podStartSLOduration=1.73902101 podStartE2EDuration="5.144553468s" podCreationTimestamp="2026-02-16 11:23:17 +0000 UTC" firstStartedPulling="2026-02-16 11:23:17.882579787 +0000 UTC m=+987.511913952" lastFinishedPulling="2026-02-16 11:23:21.288112245 +0000 UTC m=+990.917446410" observedRunningTime="2026-02-16 11:23:22.143056196 +0000 UTC m=+991.772390371" watchObservedRunningTime="2026-02-16 11:23:22.144553468 +0000 UTC m=+991.773887633" Feb 16 11:23:25 crc kubenswrapper[4949]: I0216 11:23:25.113095 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" event={"ID":"dab83125-a691-419c-b901-8476bc8881d4","Type":"ContainerStarted","Data":"baf63ee731bd518b76e21efb7f3d8219513c8aa823bc165b7bce9a100dcfee87"} Feb 16 11:23:25 crc kubenswrapper[4949]: I0216 11:23:25.170729 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-58c85c668d-rnprv" podStartSLOduration=1.806877465 podStartE2EDuration="8.170704251s" podCreationTimestamp="2026-02-16 11:23:17 +0000 UTC" firstStartedPulling="2026-02-16 11:23:18.001549073 +0000 UTC m=+987.630883238" lastFinishedPulling="2026-02-16 11:23:24.365375839 +0000 UTC m=+993.994710024" observedRunningTime="2026-02-16 11:23:25.136554522 +0000 UTC m=+994.765888707" watchObservedRunningTime="2026-02-16 11:23:25.170704251 +0000 UTC m=+994.800038436" Feb 16 11:23:27 crc kubenswrapper[4949]: I0216 11:23:27.505751 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-xtxbl" Feb 16 11:23:27 crc kubenswrapper[4949]: I0216 11:23:27.843925 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:27 crc kubenswrapper[4949]: I0216 11:23:27.844019 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:27 crc kubenswrapper[4949]: I0216 11:23:27.851503 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:28 crc kubenswrapper[4949]: I0216 11:23:28.137864 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:23:28 crc kubenswrapper[4949]: I0216 11:23:28.211512 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-786f9df47f-xh4rf"] Feb 16 11:23:34 crc kubenswrapper[4949]: I0216 11:23:34.550900 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:23:34 crc kubenswrapper[4949]: I0216 11:23:34.551495 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:23:37 crc kubenswrapper[4949]: I0216 11:23:37.429276 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-qbzzp" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.264788 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-786f9df47f-xh4rf" podUID="01deed3a-f137-429f-b5fd-672a361aa014" containerName="console" containerID="cri-o://dc4b7a4cf3e307f9fc378b5898f19b7643650d92659511e1c725565492a93948" gracePeriod=15 Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.434714 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-786f9df47f-xh4rf_01deed3a-f137-429f-b5fd-672a361aa014/console/0.log" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.435227 4949 generic.go:334] "Generic (PLEG): container finished" podID="01deed3a-f137-429f-b5fd-672a361aa014" containerID="dc4b7a4cf3e307f9fc378b5898f19b7643650d92659511e1c725565492a93948" exitCode=2 Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.435302 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-786f9df47f-xh4rf" event={"ID":"01deed3a-f137-429f-b5fd-672a361aa014","Type":"ContainerDied","Data":"dc4b7a4cf3e307f9fc378b5898f19b7643650d92659511e1c725565492a93948"} Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.782158 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-786f9df47f-xh4rf_01deed3a-f137-429f-b5fd-672a361aa014/console/0.log" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.782593 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.831814 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-trusted-ca-bundle\") pod \"01deed3a-f137-429f-b5fd-672a361aa014\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.831916 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-console-config\") pod \"01deed3a-f137-429f-b5fd-672a361aa014\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.831956 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-oauth-serving-cert\") pod \"01deed3a-f137-429f-b5fd-672a361aa014\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.832058 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rbmc\" (UniqueName: \"kubernetes.io/projected/01deed3a-f137-429f-b5fd-672a361aa014-kube-api-access-2rbmc\") pod \"01deed3a-f137-429f-b5fd-672a361aa014\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.832134 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-service-ca\") pod \"01deed3a-f137-429f-b5fd-672a361aa014\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.832187 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-serving-cert\") pod \"01deed3a-f137-429f-b5fd-672a361aa014\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.832246 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-oauth-config\") pod \"01deed3a-f137-429f-b5fd-672a361aa014\" (UID: \"01deed3a-f137-429f-b5fd-672a361aa014\") " Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.835273 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "01deed3a-f137-429f-b5fd-672a361aa014" (UID: "01deed3a-f137-429f-b5fd-672a361aa014"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.835981 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-service-ca" (OuterVolumeSpecName: "service-ca") pod "01deed3a-f137-429f-b5fd-672a361aa014" (UID: "01deed3a-f137-429f-b5fd-672a361aa014"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.836335 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "01deed3a-f137-429f-b5fd-672a361aa014" (UID: "01deed3a-f137-429f-b5fd-672a361aa014"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.841878 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-console-config" (OuterVolumeSpecName: "console-config") pod "01deed3a-f137-429f-b5fd-672a361aa014" (UID: "01deed3a-f137-429f-b5fd-672a361aa014"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.850146 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "01deed3a-f137-429f-b5fd-672a361aa014" (UID: "01deed3a-f137-429f-b5fd-672a361aa014"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.860604 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01deed3a-f137-429f-b5fd-672a361aa014-kube-api-access-2rbmc" (OuterVolumeSpecName: "kube-api-access-2rbmc") pod "01deed3a-f137-429f-b5fd-672a361aa014" (UID: "01deed3a-f137-429f-b5fd-672a361aa014"). InnerVolumeSpecName "kube-api-access-2rbmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.885344 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "01deed3a-f137-429f-b5fd-672a361aa014" (UID: "01deed3a-f137-429f-b5fd-672a361aa014"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.941710 4949 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-console-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.941763 4949 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.941775 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rbmc\" (UniqueName: \"kubernetes.io/projected/01deed3a-f137-429f-b5fd-672a361aa014-kube-api-access-2rbmc\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.941788 4949 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.941800 4949 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.941811 4949 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/01deed3a-f137-429f-b5fd-672a361aa014-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:53 crc kubenswrapper[4949]: I0216 11:23:53.941820 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01deed3a-f137-429f-b5fd-672a361aa014-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:23:54 crc kubenswrapper[4949]: I0216 11:23:54.456950 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-786f9df47f-xh4rf_01deed3a-f137-429f-b5fd-672a361aa014/console/0.log" Feb 16 11:23:54 crc kubenswrapper[4949]: I0216 11:23:54.457456 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-786f9df47f-xh4rf" event={"ID":"01deed3a-f137-429f-b5fd-672a361aa014","Type":"ContainerDied","Data":"e9a140c4b504f7a5a942092929dfd78c2dad8c83d9045846e46f28a182feb8bd"} Feb 16 11:23:54 crc kubenswrapper[4949]: I0216 11:23:54.457520 4949 scope.go:117] "RemoveContainer" containerID="dc4b7a4cf3e307f9fc378b5898f19b7643650d92659511e1c725565492a93948" Feb 16 11:23:54 crc kubenswrapper[4949]: I0216 11:23:54.457569 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-786f9df47f-xh4rf" Feb 16 11:23:54 crc kubenswrapper[4949]: I0216 11:23:54.502426 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-786f9df47f-xh4rf"] Feb 16 11:23:54 crc kubenswrapper[4949]: I0216 11:23:54.509842 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-786f9df47f-xh4rf"] Feb 16 11:23:55 crc kubenswrapper[4949]: I0216 11:23:55.245131 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01deed3a-f137-429f-b5fd-672a361aa014" path="/var/lib/kubelet/pods/01deed3a-f137-429f-b5fd-672a361aa014/volumes" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.208398 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n"] Feb 16 11:23:59 crc kubenswrapper[4949]: E0216 11:23:59.210560 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01deed3a-f137-429f-b5fd-672a361aa014" containerName="console" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.210614 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="01deed3a-f137-429f-b5fd-672a361aa014" containerName="console" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.211050 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="01deed3a-f137-429f-b5fd-672a361aa014" containerName="console" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.217588 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.223964 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.227537 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n"] Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.249799 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-bundle\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.249874 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxqgw\" (UniqueName: \"kubernetes.io/projected/81192a09-60ad-4403-85e3-e4994c0f4bd3-kube-api-access-jxqgw\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.250063 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-util\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.352025 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-util\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.352155 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-bundle\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.352211 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxqgw\" (UniqueName: \"kubernetes.io/projected/81192a09-60ad-4403-85e3-e4994c0f4bd3-kube-api-access-jxqgw\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.353685 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-util\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.353775 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-bundle\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.377441 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxqgw\" (UniqueName: \"kubernetes.io/projected/81192a09-60ad-4403-85e3-e4994c0f4bd3-kube-api-access-jxqgw\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:23:59 crc kubenswrapper[4949]: I0216 11:23:59.593406 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:24:00 crc kubenswrapper[4949]: I0216 11:24:00.018194 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n"] Feb 16 11:24:00 crc kubenswrapper[4949]: I0216 11:24:00.509799 4949 generic.go:334] "Generic (PLEG): container finished" podID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerID="f53f74d6d0b7824f10351115059e5371a045e36f89fa860fd4a5967a0689f48d" exitCode=0 Feb 16 11:24:00 crc kubenswrapper[4949]: I0216 11:24:00.509863 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" event={"ID":"81192a09-60ad-4403-85e3-e4994c0f4bd3","Type":"ContainerDied","Data":"f53f74d6d0b7824f10351115059e5371a045e36f89fa860fd4a5967a0689f48d"} Feb 16 11:24:00 crc kubenswrapper[4949]: I0216 11:24:00.509902 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" event={"ID":"81192a09-60ad-4403-85e3-e4994c0f4bd3","Type":"ContainerStarted","Data":"14027630808c686a1ec33f98cc2be600c97425d61599040aa8a224e5ee8d6a3c"} Feb 16 11:24:00 crc kubenswrapper[4949]: I0216 11:24:00.512015 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:24:02 crc kubenswrapper[4949]: I0216 11:24:02.551493 4949 generic.go:334] "Generic (PLEG): container finished" podID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerID="4dd1bc20b57a9953bcce194de861e522a0747f483f1cf56b3f8a837177c1f9f0" exitCode=0 Feb 16 11:24:02 crc kubenswrapper[4949]: I0216 11:24:02.552672 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" event={"ID":"81192a09-60ad-4403-85e3-e4994c0f4bd3","Type":"ContainerDied","Data":"4dd1bc20b57a9953bcce194de861e522a0747f483f1cf56b3f8a837177c1f9f0"} Feb 16 11:24:03 crc kubenswrapper[4949]: I0216 11:24:03.565212 4949 generic.go:334] "Generic (PLEG): container finished" podID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerID="f05194454b5f675fa0326da150adeef60bb4076e9efe5e1efbac7a2e56eca294" exitCode=0 Feb 16 11:24:03 crc kubenswrapper[4949]: I0216 11:24:03.565308 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" event={"ID":"81192a09-60ad-4403-85e3-e4994c0f4bd3","Type":"ContainerDied","Data":"f05194454b5f675fa0326da150adeef60bb4076e9efe5e1efbac7a2e56eca294"} Feb 16 11:24:04 crc kubenswrapper[4949]: I0216 11:24:04.550407 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:24:04 crc kubenswrapper[4949]: I0216 11:24:04.550976 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:24:04 crc kubenswrapper[4949]: I0216 11:24:04.551044 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:24:04 crc kubenswrapper[4949]: I0216 11:24:04.552200 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3be1216f0de04908b66655ea21e2d3a0e3a372ff9aac95cc621972831b9f6c40"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:24:04 crc kubenswrapper[4949]: I0216 11:24:04.552282 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://3be1216f0de04908b66655ea21e2d3a0e3a372ff9aac95cc621972831b9f6c40" gracePeriod=600 Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.011705 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.068816 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-bundle\") pod \"81192a09-60ad-4403-85e3-e4994c0f4bd3\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.068982 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-util\") pod \"81192a09-60ad-4403-85e3-e4994c0f4bd3\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.069049 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxqgw\" (UniqueName: \"kubernetes.io/projected/81192a09-60ad-4403-85e3-e4994c0f4bd3-kube-api-access-jxqgw\") pod \"81192a09-60ad-4403-85e3-e4994c0f4bd3\" (UID: \"81192a09-60ad-4403-85e3-e4994c0f4bd3\") " Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.070025 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-bundle" (OuterVolumeSpecName: "bundle") pod "81192a09-60ad-4403-85e3-e4994c0f4bd3" (UID: "81192a09-60ad-4403-85e3-e4994c0f4bd3"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.085065 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-util" (OuterVolumeSpecName: "util") pod "81192a09-60ad-4403-85e3-e4994c0f4bd3" (UID: "81192a09-60ad-4403-85e3-e4994c0f4bd3"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.090029 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81192a09-60ad-4403-85e3-e4994c0f4bd3-kube-api-access-jxqgw" (OuterVolumeSpecName: "kube-api-access-jxqgw") pod "81192a09-60ad-4403-85e3-e4994c0f4bd3" (UID: "81192a09-60ad-4403-85e3-e4994c0f4bd3"). InnerVolumeSpecName "kube-api-access-jxqgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.171224 4949 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-util\") on node \"crc\" DevicePath \"\"" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.171265 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxqgw\" (UniqueName: \"kubernetes.io/projected/81192a09-60ad-4403-85e3-e4994c0f4bd3-kube-api-access-jxqgw\") on node \"crc\" DevicePath \"\"" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.171276 4949 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/81192a09-60ad-4403-85e3-e4994c0f4bd3-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.598341 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" event={"ID":"81192a09-60ad-4403-85e3-e4994c0f4bd3","Type":"ContainerDied","Data":"14027630808c686a1ec33f98cc2be600c97425d61599040aa8a224e5ee8d6a3c"} Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.598856 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14027630808c686a1ec33f98cc2be600c97425d61599040aa8a224e5ee8d6a3c" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.598460 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n" Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.605613 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="3be1216f0de04908b66655ea21e2d3a0e3a372ff9aac95cc621972831b9f6c40" exitCode=0 Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.605691 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"3be1216f0de04908b66655ea21e2d3a0e3a372ff9aac95cc621972831b9f6c40"} Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.605738 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"81d321ae3cf8ae54ff452597f9576b0c0a3cd11dc176e9a1b231f17a43bc97b9"} Feb 16 11:24:05 crc kubenswrapper[4949]: I0216 11:24:05.605767 4949 scope.go:117] "RemoveContainer" containerID="060f940f61e708f7f4d603618a347c5e6eb0b808f7ca4a5027e6133a9e486da3" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.496122 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97"] Feb 16 11:24:14 crc kubenswrapper[4949]: E0216 11:24:14.497079 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerName="util" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.497093 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerName="util" Feb 16 11:24:14 crc kubenswrapper[4949]: E0216 11:24:14.497106 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerName="extract" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.497112 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerName="extract" Feb 16 11:24:14 crc kubenswrapper[4949]: E0216 11:24:14.497124 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerName="pull" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.497131 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerName="pull" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.497299 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="81192a09-60ad-4403-85e3-e4994c0f4bd3" containerName="extract" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.497962 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.501392 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.501592 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-mbzff" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.501798 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.501932 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.502875 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.568541 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97"] Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.576840 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c8bcb521-dc2b-4646-898a-f488d0626ebb-webhook-cert\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.576992 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4mvj\" (UniqueName: \"kubernetes.io/projected/c8bcb521-dc2b-4646-898a-f488d0626ebb-kube-api-access-r4mvj\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.577024 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c8bcb521-dc2b-4646-898a-f488d0626ebb-apiservice-cert\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.679639 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c8bcb521-dc2b-4646-898a-f488d0626ebb-webhook-cert\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.679754 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4mvj\" (UniqueName: \"kubernetes.io/projected/c8bcb521-dc2b-4646-898a-f488d0626ebb-kube-api-access-r4mvj\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.679788 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c8bcb521-dc2b-4646-898a-f488d0626ebb-apiservice-cert\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.688332 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c8bcb521-dc2b-4646-898a-f488d0626ebb-webhook-cert\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.703038 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c8bcb521-dc2b-4646-898a-f488d0626ebb-apiservice-cert\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.704062 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4mvj\" (UniqueName: \"kubernetes.io/projected/c8bcb521-dc2b-4646-898a-f488d0626ebb-kube-api-access-r4mvj\") pod \"metallb-operator-controller-manager-858d98cccb-ckn97\" (UID: \"c8bcb521-dc2b-4646-898a-f488d0626ebb\") " pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.778030 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v"] Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.779559 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.783797 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-qtqq2" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.784102 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.784122 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.802154 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v"] Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.820041 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.882797 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nmgh\" (UniqueName: \"kubernetes.io/projected/17fd589b-a008-4648-bf62-cf0bfceb4878-kube-api-access-4nmgh\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.882867 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/17fd589b-a008-4648-bf62-cf0bfceb4878-webhook-cert\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.882911 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/17fd589b-a008-4648-bf62-cf0bfceb4878-apiservice-cert\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.985623 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nmgh\" (UniqueName: \"kubernetes.io/projected/17fd589b-a008-4648-bf62-cf0bfceb4878-kube-api-access-4nmgh\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.985692 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/17fd589b-a008-4648-bf62-cf0bfceb4878-webhook-cert\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.985729 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/17fd589b-a008-4648-bf62-cf0bfceb4878-apiservice-cert\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:14 crc kubenswrapper[4949]: I0216 11:24:14.991929 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/17fd589b-a008-4648-bf62-cf0bfceb4878-webhook-cert\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:15 crc kubenswrapper[4949]: I0216 11:24:15.017853 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/17fd589b-a008-4648-bf62-cf0bfceb4878-apiservice-cert\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:15 crc kubenswrapper[4949]: I0216 11:24:15.033805 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nmgh\" (UniqueName: \"kubernetes.io/projected/17fd589b-a008-4648-bf62-cf0bfceb4878-kube-api-access-4nmgh\") pod \"metallb-operator-webhook-server-7c75d8dfd-vlp9v\" (UID: \"17fd589b-a008-4648-bf62-cf0bfceb4878\") " pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:15 crc kubenswrapper[4949]: I0216 11:24:15.100665 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:15 crc kubenswrapper[4949]: I0216 11:24:15.413312 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v"] Feb 16 11:24:15 crc kubenswrapper[4949]: I0216 11:24:15.439515 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97"] Feb 16 11:24:15 crc kubenswrapper[4949]: W0216 11:24:15.443122 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8bcb521_dc2b_4646_898a_f488d0626ebb.slice/crio-35f3eedb73464e858f535185c0bc45cda9ba236a11b12f3ac17fa72377c61071 WatchSource:0}: Error finding container 35f3eedb73464e858f535185c0bc45cda9ba236a11b12f3ac17fa72377c61071: Status 404 returned error can't find the container with id 35f3eedb73464e858f535185c0bc45cda9ba236a11b12f3ac17fa72377c61071 Feb 16 11:24:15 crc kubenswrapper[4949]: I0216 11:24:15.725306 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" event={"ID":"17fd589b-a008-4648-bf62-cf0bfceb4878","Type":"ContainerStarted","Data":"cc63a9015bfecab849d472dd5b0ac9cff1f16f47a758bd908179c5ecc2b60df5"} Feb 16 11:24:15 crc kubenswrapper[4949]: I0216 11:24:15.728720 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" event={"ID":"c8bcb521-dc2b-4646-898a-f488d0626ebb","Type":"ContainerStarted","Data":"35f3eedb73464e858f535185c0bc45cda9ba236a11b12f3ac17fa72377c61071"} Feb 16 11:24:21 crc kubenswrapper[4949]: I0216 11:24:21.797301 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" event={"ID":"c8bcb521-dc2b-4646-898a-f488d0626ebb","Type":"ContainerStarted","Data":"423ec39f33cf16fcd33382437489c08a121264a8e5a2c9bf84ccc1eebbd63cb2"} Feb 16 11:24:21 crc kubenswrapper[4949]: I0216 11:24:21.799162 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" event={"ID":"17fd589b-a008-4648-bf62-cf0bfceb4878","Type":"ContainerStarted","Data":"be2d6d2b58f87e74815dd0521a925056a35916f27b06620fb68f80d4f429da74"} Feb 16 11:24:21 crc kubenswrapper[4949]: I0216 11:24:21.799729 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:21 crc kubenswrapper[4949]: I0216 11:24:21.821718 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" podStartSLOduration=2.421153977 podStartE2EDuration="7.821698527s" podCreationTimestamp="2026-02-16 11:24:14 +0000 UTC" firstStartedPulling="2026-02-16 11:24:15.446877494 +0000 UTC m=+1045.076211659" lastFinishedPulling="2026-02-16 11:24:20.847422044 +0000 UTC m=+1050.476756209" observedRunningTime="2026-02-16 11:24:21.81968685 +0000 UTC m=+1051.449021015" watchObservedRunningTime="2026-02-16 11:24:21.821698527 +0000 UTC m=+1051.451032692" Feb 16 11:24:21 crc kubenswrapper[4949]: I0216 11:24:21.857722 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" podStartSLOduration=2.427130937 podStartE2EDuration="7.857701961s" podCreationTimestamp="2026-02-16 11:24:14 +0000 UTC" firstStartedPulling="2026-02-16 11:24:15.421388029 +0000 UTC m=+1045.050722194" lastFinishedPulling="2026-02-16 11:24:20.851959053 +0000 UTC m=+1050.481293218" observedRunningTime="2026-02-16 11:24:21.854207412 +0000 UTC m=+1051.483541577" watchObservedRunningTime="2026-02-16 11:24:21.857701961 +0000 UTC m=+1051.487036126" Feb 16 11:24:22 crc kubenswrapper[4949]: I0216 11:24:22.807592 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:35 crc kubenswrapper[4949]: I0216 11:24:35.110560 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7c75d8dfd-vlp9v" Feb 16 11:24:54 crc kubenswrapper[4949]: I0216 11:24:54.824877 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-858d98cccb-ckn97" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.617702 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44"] Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.619306 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.625786 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vw6j\" (UniqueName: \"kubernetes.io/projected/2fc23a3f-060b-4441-9ed0-b9a6749338be-kube-api-access-4vw6j\") pod \"frr-k8s-webhook-server-78b44bf5bb-5tk44\" (UID: \"2fc23a3f-060b-4441-9ed0-b9a6749338be\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.625840 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2fc23a3f-060b-4441-9ed0-b9a6749338be-cert\") pod \"frr-k8s-webhook-server-78b44bf5bb-5tk44\" (UID: \"2fc23a3f-060b-4441-9ed0-b9a6749338be\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.629766 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-knrks"] Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.634221 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.634645 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-dvnr4" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.635515 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.637271 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.639752 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.684598 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44"] Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.727504 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vw6j\" (UniqueName: \"kubernetes.io/projected/2fc23a3f-060b-4441-9ed0-b9a6749338be-kube-api-access-4vw6j\") pod \"frr-k8s-webhook-server-78b44bf5bb-5tk44\" (UID: \"2fc23a3f-060b-4441-9ed0-b9a6749338be\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.727551 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2fc23a3f-060b-4441-9ed0-b9a6749338be-cert\") pod \"frr-k8s-webhook-server-78b44bf5bb-5tk44\" (UID: \"2fc23a3f-060b-4441-9ed0-b9a6749338be\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:55 crc kubenswrapper[4949]: E0216 11:24:55.727787 4949 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Feb 16 11:24:55 crc kubenswrapper[4949]: E0216 11:24:55.727852 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2fc23a3f-060b-4441-9ed0-b9a6749338be-cert podName:2fc23a3f-060b-4441-9ed0-b9a6749338be nodeName:}" failed. No retries permitted until 2026-02-16 11:24:56.22783161 +0000 UTC m=+1085.857165775 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2fc23a3f-060b-4441-9ed0-b9a6749338be-cert") pod "frr-k8s-webhook-server-78b44bf5bb-5tk44" (UID: "2fc23a3f-060b-4441-9ed0-b9a6749338be") : secret "frr-k8s-webhook-server-cert" not found Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.732162 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-vfbbw"] Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.733835 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-vfbbw" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.740430 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.740469 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-ghlx6" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.740609 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.740796 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.749472 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-69bbfbf88f-s6ht6"] Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.750995 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.754565 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.766236 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-69bbfbf88f-s6ht6"] Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.767374 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vw6j\" (UniqueName: \"kubernetes.io/projected/2fc23a3f-060b-4441-9ed0-b9a6749338be-kube-api-access-4vw6j\") pod \"frr-k8s-webhook-server-78b44bf5bb-5tk44\" (UID: \"2fc23a3f-060b-4441-9ed0-b9a6749338be\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.829745 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e39d2618-f332-4236-9437-77af6dc23e3d-metrics-certs\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.830254 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfktw\" (UniqueName: \"kubernetes.io/projected/e39d2618-f332-4236-9437-77af6dc23e3d-kube-api-access-wfktw\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.830289 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-frr-sockets\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.830332 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-metrics\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.830441 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-frr-conf\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.830482 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-reloader\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.830510 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e39d2618-f332-4236-9437-77af6dc23e3d-frr-startup\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932365 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e39d2618-f332-4236-9437-77af6dc23e3d-metrics-certs\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932482 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfktw\" (UniqueName: \"kubernetes.io/projected/e39d2618-f332-4236-9437-77af6dc23e3d-kube-api-access-wfktw\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932513 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-frr-sockets\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932556 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-metrics\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932593 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwsnk\" (UniqueName: \"kubernetes.io/projected/f007c1dd-b910-41d6-96d9-1642b8eec8c3-kube-api-access-zwsnk\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932644 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-cert\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932729 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metallb-excludel2\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932814 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-frr-conf\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932862 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6jsv\" (UniqueName: \"kubernetes.io/projected/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-kube-api-access-g6jsv\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932888 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-reloader\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.932968 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.933012 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e39d2618-f332-4236-9437-77af6dc23e3d-frr-startup\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.933036 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-metrics-certs\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.933057 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metrics-certs\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.934477 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-frr-sockets\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.934857 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-frr-conf\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.934904 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-metrics\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.935159 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e39d2618-f332-4236-9437-77af6dc23e3d-reloader\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.935774 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e39d2618-f332-4236-9437-77af6dc23e3d-frr-startup\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.937365 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e39d2618-f332-4236-9437-77af6dc23e3d-metrics-certs\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:55 crc kubenswrapper[4949]: I0216 11:24:55.973165 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfktw\" (UniqueName: \"kubernetes.io/projected/e39d2618-f332-4236-9437-77af6dc23e3d-kube-api-access-wfktw\") pod \"frr-k8s-knrks\" (UID: \"e39d2618-f332-4236-9437-77af6dc23e3d\") " pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.035375 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metallb-excludel2\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.035485 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6jsv\" (UniqueName: \"kubernetes.io/projected/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-kube-api-access-g6jsv\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.035517 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.035546 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metrics-certs\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.035568 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-metrics-certs\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.035629 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwsnk\" (UniqueName: \"kubernetes.io/projected/f007c1dd-b910-41d6-96d9-1642b8eec8c3-kube-api-access-zwsnk\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.035657 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-cert\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:56 crc kubenswrapper[4949]: E0216 11:24:56.035845 4949 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Feb 16 11:24:56 crc kubenswrapper[4949]: E0216 11:24:56.035844 4949 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Feb 16 11:24:56 crc kubenswrapper[4949]: E0216 11:24:56.035989 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metrics-certs podName:f007c1dd-b910-41d6-96d9-1642b8eec8c3 nodeName:}" failed. No retries permitted until 2026-02-16 11:24:56.535947271 +0000 UTC m=+1086.165281436 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metrics-certs") pod "speaker-vfbbw" (UID: "f007c1dd-b910-41d6-96d9-1642b8eec8c3") : secret "speaker-certs-secret" not found Feb 16 11:24:56 crc kubenswrapper[4949]: E0216 11:24:56.036031 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-metrics-certs podName:deae7c82-4cc7-4bb3-a96b-fd537f41fe89 nodeName:}" failed. No retries permitted until 2026-02-16 11:24:56.536004643 +0000 UTC m=+1086.165338808 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-metrics-certs") pod "controller-69bbfbf88f-s6ht6" (UID: "deae7c82-4cc7-4bb3-a96b-fd537f41fe89") : secret "controller-certs-secret" not found Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.037050 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metallb-excludel2\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: E0216 11:24:56.038117 4949 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Feb 16 11:24:56 crc kubenswrapper[4949]: E0216 11:24:56.038164 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist podName:f007c1dd-b910-41d6-96d9-1642b8eec8c3 nodeName:}" failed. No retries permitted until 2026-02-16 11:24:56.538154234 +0000 UTC m=+1086.167488399 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist") pod "speaker-vfbbw" (UID: "f007c1dd-b910-41d6-96d9-1642b8eec8c3") : secret "metallb-memberlist" not found Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.038497 4949 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.050198 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-cert\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.056110 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6jsv\" (UniqueName: \"kubernetes.io/projected/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-kube-api-access-g6jsv\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.064235 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwsnk\" (UniqueName: \"kubernetes.io/projected/f007c1dd-b910-41d6-96d9-1642b8eec8c3-kube-api-access-zwsnk\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.240296 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2fc23a3f-060b-4441-9ed0-b9a6749338be-cert\") pod \"frr-k8s-webhook-server-78b44bf5bb-5tk44\" (UID: \"2fc23a3f-060b-4441-9ed0-b9a6749338be\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.244290 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2fc23a3f-060b-4441-9ed0-b9a6749338be-cert\") pod \"frr-k8s-webhook-server-78b44bf5bb-5tk44\" (UID: \"2fc23a3f-060b-4441-9ed0-b9a6749338be\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.263471 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-knrks" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.542937 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.546549 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-metrics-certs\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.546619 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metrics-certs\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.546849 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: E0216 11:24:56.547027 4949 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Feb 16 11:24:56 crc kubenswrapper[4949]: E0216 11:24:56.547111 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist podName:f007c1dd-b910-41d6-96d9-1642b8eec8c3 nodeName:}" failed. No retries permitted until 2026-02-16 11:24:57.547091245 +0000 UTC m=+1087.176425410 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist") pod "speaker-vfbbw" (UID: "f007c1dd-b910-41d6-96d9-1642b8eec8c3") : secret "metallb-memberlist" not found Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.552127 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-metrics-certs\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.555961 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/deae7c82-4cc7-4bb3-a96b-fd537f41fe89-metrics-certs\") pod \"controller-69bbfbf88f-s6ht6\" (UID: \"deae7c82-4cc7-4bb3-a96b-fd537f41fe89\") " pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:56 crc kubenswrapper[4949]: I0216 11:24:56.714051 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:57 crc kubenswrapper[4949]: I0216 11:24:57.097935 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44"] Feb 16 11:24:57 crc kubenswrapper[4949]: I0216 11:24:57.114695 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerStarted","Data":"8d6a79fa484b6946d1c9f29bfafc9595a16b6fce1a88cb915f56beb84780e6d9"} Feb 16 11:24:57 crc kubenswrapper[4949]: I0216 11:24:57.181233 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-69bbfbf88f-s6ht6"] Feb 16 11:24:57 crc kubenswrapper[4949]: I0216 11:24:57.583473 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:57 crc kubenswrapper[4949]: I0216 11:24:57.610069 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f007c1dd-b910-41d6-96d9-1642b8eec8c3-memberlist\") pod \"speaker-vfbbw\" (UID: \"f007c1dd-b910-41d6-96d9-1642b8eec8c3\") " pod="metallb-system/speaker-vfbbw" Feb 16 11:24:57 crc kubenswrapper[4949]: I0216 11:24:57.904538 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-vfbbw" Feb 16 11:24:57 crc kubenswrapper[4949]: W0216 11:24:57.937736 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf007c1dd_b910_41d6_96d9_1642b8eec8c3.slice/crio-848ac7a253128d741d65ea294354b5b048a47900862835fedaf8bf028a6b6b11 WatchSource:0}: Error finding container 848ac7a253128d741d65ea294354b5b048a47900862835fedaf8bf028a6b6b11: Status 404 returned error can't find the container with id 848ac7a253128d741d65ea294354b5b048a47900862835fedaf8bf028a6b6b11 Feb 16 11:24:58 crc kubenswrapper[4949]: I0216 11:24:58.129701 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" event={"ID":"2fc23a3f-060b-4441-9ed0-b9a6749338be","Type":"ContainerStarted","Data":"485edb590de4b5a8bd7b43b1edc1d63c5c8ee3183d1efd74d6843b27fc6c71b2"} Feb 16 11:24:58 crc kubenswrapper[4949]: I0216 11:24:58.130971 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-vfbbw" event={"ID":"f007c1dd-b910-41d6-96d9-1642b8eec8c3","Type":"ContainerStarted","Data":"848ac7a253128d741d65ea294354b5b048a47900862835fedaf8bf028a6b6b11"} Feb 16 11:24:58 crc kubenswrapper[4949]: I0216 11:24:58.138466 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-69bbfbf88f-s6ht6" event={"ID":"deae7c82-4cc7-4bb3-a96b-fd537f41fe89","Type":"ContainerStarted","Data":"5be52253f788c35533a873169b0ce632c5bb64269d2e5e179dc72fd58507433d"} Feb 16 11:24:58 crc kubenswrapper[4949]: I0216 11:24:58.138517 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-69bbfbf88f-s6ht6" event={"ID":"deae7c82-4cc7-4bb3-a96b-fd537f41fe89","Type":"ContainerStarted","Data":"da5dfc58646b89f125b66fe5606b3e5293c8dc6e98974d59bd4cb76f90386070"} Feb 16 11:24:58 crc kubenswrapper[4949]: I0216 11:24:58.138528 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-69bbfbf88f-s6ht6" event={"ID":"deae7c82-4cc7-4bb3-a96b-fd537f41fe89","Type":"ContainerStarted","Data":"2fac9a320bc7ea550b575e51cb827836b44296b4166169a287de172221e796ac"} Feb 16 11:24:58 crc kubenswrapper[4949]: I0216 11:24:58.138660 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:24:58 crc kubenswrapper[4949]: I0216 11:24:58.179507 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-69bbfbf88f-s6ht6" podStartSLOduration=3.17948319 podStartE2EDuration="3.17948319s" podCreationTimestamp="2026-02-16 11:24:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:24:58.174788477 +0000 UTC m=+1087.804122642" watchObservedRunningTime="2026-02-16 11:24:58.17948319 +0000 UTC m=+1087.808817355" Feb 16 11:24:59 crc kubenswrapper[4949]: I0216 11:24:59.160073 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-vfbbw" event={"ID":"f007c1dd-b910-41d6-96d9-1642b8eec8c3","Type":"ContainerStarted","Data":"7b88af17a3831d72ea24f2b83f68f2cd36a6eebb4d5426dd94bc06b5d2468647"} Feb 16 11:24:59 crc kubenswrapper[4949]: I0216 11:24:59.160125 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-vfbbw" event={"ID":"f007c1dd-b910-41d6-96d9-1642b8eec8c3","Type":"ContainerStarted","Data":"2956ee3bdc6190286bf2b808482524ba34a0e25812034445d3469210746ed9e3"} Feb 16 11:24:59 crc kubenswrapper[4949]: I0216 11:24:59.160161 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-vfbbw" Feb 16 11:24:59 crc kubenswrapper[4949]: I0216 11:24:59.182653 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-vfbbw" podStartSLOduration=4.182614444 podStartE2EDuration="4.182614444s" podCreationTimestamp="2026-02-16 11:24:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:24:59.176913162 +0000 UTC m=+1088.806247337" watchObservedRunningTime="2026-02-16 11:24:59.182614444 +0000 UTC m=+1088.811948639" Feb 16 11:25:05 crc kubenswrapper[4949]: I0216 11:25:05.231435 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" event={"ID":"2fc23a3f-060b-4441-9ed0-b9a6749338be","Type":"ContainerStarted","Data":"7ee005816d7648d85a3ad14dbfb78c290bb52daae75e6eff6d78bb8c6f5fcdef"} Feb 16 11:25:05 crc kubenswrapper[4949]: I0216 11:25:05.232570 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:25:05 crc kubenswrapper[4949]: I0216 11:25:05.235191 4949 generic.go:334] "Generic (PLEG): container finished" podID="e39d2618-f332-4236-9437-77af6dc23e3d" containerID="7b89f7b3028c02f2e4a2da4b443a72c9ed4c883eadfa542274dcc1df241d5ee3" exitCode=0 Feb 16 11:25:05 crc kubenswrapper[4949]: I0216 11:25:05.269306 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" podStartSLOduration=2.773689692 podStartE2EDuration="10.269278683s" podCreationTimestamp="2026-02-16 11:24:55 +0000 UTC" firstStartedPulling="2026-02-16 11:24:57.114402816 +0000 UTC m=+1086.743736981" lastFinishedPulling="2026-02-16 11:25:04.609991807 +0000 UTC m=+1094.239325972" observedRunningTime="2026-02-16 11:25:05.255434119 +0000 UTC m=+1094.884768304" watchObservedRunningTime="2026-02-16 11:25:05.269278683 +0000 UTC m=+1094.898612848" Feb 16 11:25:05 crc kubenswrapper[4949]: I0216 11:25:05.269468 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerDied","Data":"7b89f7b3028c02f2e4a2da4b443a72c9ed4c883eadfa542274dcc1df241d5ee3"} Feb 16 11:25:06 crc kubenswrapper[4949]: I0216 11:25:06.247759 4949 generic.go:334] "Generic (PLEG): container finished" podID="e39d2618-f332-4236-9437-77af6dc23e3d" containerID="ce0388c1a6e24884c68acdacc8c13fd4ffb78fb1103e3958dddd8b6b24abd969" exitCode=0 Feb 16 11:25:06 crc kubenswrapper[4949]: I0216 11:25:06.249447 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerDied","Data":"ce0388c1a6e24884c68acdacc8c13fd4ffb78fb1103e3958dddd8b6b24abd969"} Feb 16 11:25:07 crc kubenswrapper[4949]: I0216 11:25:07.258421 4949 generic.go:334] "Generic (PLEG): container finished" podID="e39d2618-f332-4236-9437-77af6dc23e3d" containerID="5c03d1a3e8a6e1d576c6e76e1c70a5a5d64ee8394b13a927175cc0fbfff65231" exitCode=0 Feb 16 11:25:07 crc kubenswrapper[4949]: I0216 11:25:07.258506 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerDied","Data":"5c03d1a3e8a6e1d576c6e76e1c70a5a5d64ee8394b13a927175cc0fbfff65231"} Feb 16 11:25:08 crc kubenswrapper[4949]: I0216 11:25:08.277504 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerStarted","Data":"de91e1d40ef66e0fea0b8c06b4d63ff1da70b9b02cefbebfefd52cc7ba864c8a"} Feb 16 11:25:08 crc kubenswrapper[4949]: I0216 11:25:08.277919 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerStarted","Data":"a6ba890c25d4a6436dd480c9c5fc7ee8a8dbd2e69a6f2a4b2d9b72b493c1d9a5"} Feb 16 11:25:08 crc kubenswrapper[4949]: I0216 11:25:08.277933 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerStarted","Data":"9dc381583c09e3a9ecd6030718e33eff1406de57dff35387901ea4bbeb21c3bb"} Feb 16 11:25:08 crc kubenswrapper[4949]: I0216 11:25:08.277944 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerStarted","Data":"ed7f702af566e7d5f070212987a7adf8aa392ed5551da4fc11dec5ce54762387"} Feb 16 11:25:08 crc kubenswrapper[4949]: I0216 11:25:08.277956 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerStarted","Data":"6b6ac96e497f0dcc0099cdb2168bbf00725fb9896d7da113df691f4bc2a8ba55"} Feb 16 11:25:09 crc kubenswrapper[4949]: I0216 11:25:09.298686 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knrks" event={"ID":"e39d2618-f332-4236-9437-77af6dc23e3d","Type":"ContainerStarted","Data":"31d26a48b01a544c2db070d773cfce22e46b777881628a34ba09742a5e976f25"} Feb 16 11:25:09 crc kubenswrapper[4949]: I0216 11:25:09.299447 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-knrks" Feb 16 11:25:09 crc kubenswrapper[4949]: I0216 11:25:09.334878 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-knrks" podStartSLOduration=6.099290402 podStartE2EDuration="14.334848734s" podCreationTimestamp="2026-02-16 11:24:55 +0000 UTC" firstStartedPulling="2026-02-16 11:24:56.398644054 +0000 UTC m=+1086.027978219" lastFinishedPulling="2026-02-16 11:25:04.634202386 +0000 UTC m=+1094.263536551" observedRunningTime="2026-02-16 11:25:09.331973872 +0000 UTC m=+1098.961308037" watchObservedRunningTime="2026-02-16 11:25:09.334848734 +0000 UTC m=+1098.964182899" Feb 16 11:25:11 crc kubenswrapper[4949]: I0216 11:25:11.264781 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-knrks" Feb 16 11:25:11 crc kubenswrapper[4949]: I0216 11:25:11.306909 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-knrks" Feb 16 11:25:16 crc kubenswrapper[4949]: I0216 11:25:16.549534 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-5tk44" Feb 16 11:25:16 crc kubenswrapper[4949]: I0216 11:25:16.720442 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-69bbfbf88f-s6ht6" Feb 16 11:25:17 crc kubenswrapper[4949]: I0216 11:25:17.910579 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-vfbbw" Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.487643 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-v94vp"] Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.490119 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-v94vp" Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.499536 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.499553 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.499858 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-l8p6b" Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.528415 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-v94vp"] Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.611926 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smnwp\" (UniqueName: \"kubernetes.io/projected/43715d15-1950-411d-a940-ad1359dcf97c-kube-api-access-smnwp\") pod \"openstack-operator-index-v94vp\" (UID: \"43715d15-1950-411d-a940-ad1359dcf97c\") " pod="openstack-operators/openstack-operator-index-v94vp" Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.713975 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smnwp\" (UniqueName: \"kubernetes.io/projected/43715d15-1950-411d-a940-ad1359dcf97c-kube-api-access-smnwp\") pod \"openstack-operator-index-v94vp\" (UID: \"43715d15-1950-411d-a940-ad1359dcf97c\") " pod="openstack-operators/openstack-operator-index-v94vp" Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.745419 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smnwp\" (UniqueName: \"kubernetes.io/projected/43715d15-1950-411d-a940-ad1359dcf97c-kube-api-access-smnwp\") pod \"openstack-operator-index-v94vp\" (UID: \"43715d15-1950-411d-a940-ad1359dcf97c\") " pod="openstack-operators/openstack-operator-index-v94vp" Feb 16 11:25:20 crc kubenswrapper[4949]: I0216 11:25:20.817667 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-v94vp" Feb 16 11:25:21 crc kubenswrapper[4949]: I0216 11:25:21.293053 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-v94vp"] Feb 16 11:25:21 crc kubenswrapper[4949]: W0216 11:25:21.299434 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43715d15_1950_411d_a940_ad1359dcf97c.slice/crio-94ca2bfe918f70abda6859b5fe62d5a30b00774e0221b707b9d2b937bdf60d06 WatchSource:0}: Error finding container 94ca2bfe918f70abda6859b5fe62d5a30b00774e0221b707b9d2b937bdf60d06: Status 404 returned error can't find the container with id 94ca2bfe918f70abda6859b5fe62d5a30b00774e0221b707b9d2b937bdf60d06 Feb 16 11:25:21 crc kubenswrapper[4949]: I0216 11:25:21.423047 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-v94vp" event={"ID":"43715d15-1950-411d-a940-ad1359dcf97c","Type":"ContainerStarted","Data":"94ca2bfe918f70abda6859b5fe62d5a30b00774e0221b707b9d2b937bdf60d06"} Feb 16 11:25:23 crc kubenswrapper[4949]: I0216 11:25:23.834788 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-v94vp"] Feb 16 11:25:24 crc kubenswrapper[4949]: I0216 11:25:24.439124 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-llhm4"] Feb 16 11:25:24 crc kubenswrapper[4949]: I0216 11:25:24.440560 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:24 crc kubenswrapper[4949]: I0216 11:25:24.453135 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-llhm4"] Feb 16 11:25:24 crc kubenswrapper[4949]: I0216 11:25:24.496404 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztkr7\" (UniqueName: \"kubernetes.io/projected/dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9-kube-api-access-ztkr7\") pod \"openstack-operator-index-llhm4\" (UID: \"dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9\") " pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:24 crc kubenswrapper[4949]: I0216 11:25:24.598566 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztkr7\" (UniqueName: \"kubernetes.io/projected/dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9-kube-api-access-ztkr7\") pod \"openstack-operator-index-llhm4\" (UID: \"dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9\") " pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:24 crc kubenswrapper[4949]: I0216 11:25:24.629016 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztkr7\" (UniqueName: \"kubernetes.io/projected/dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9-kube-api-access-ztkr7\") pod \"openstack-operator-index-llhm4\" (UID: \"dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9\") " pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:24 crc kubenswrapper[4949]: I0216 11:25:24.769738 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:25 crc kubenswrapper[4949]: I0216 11:25:25.284600 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-llhm4"] Feb 16 11:25:25 crc kubenswrapper[4949]: I0216 11:25:25.464550 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-v94vp" event={"ID":"43715d15-1950-411d-a940-ad1359dcf97c","Type":"ContainerStarted","Data":"5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e"} Feb 16 11:25:25 crc kubenswrapper[4949]: I0216 11:25:25.464673 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-v94vp" podUID="43715d15-1950-411d-a940-ad1359dcf97c" containerName="registry-server" containerID="cri-o://5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e" gracePeriod=2 Feb 16 11:25:25 crc kubenswrapper[4949]: I0216 11:25:25.469817 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-llhm4" event={"ID":"dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9","Type":"ContainerStarted","Data":"a92cfa32b7cff5e3d844c6c51b2c78a879dff7e4df6a8752565b513188f6fb7c"} Feb 16 11:25:25 crc kubenswrapper[4949]: I0216 11:25:25.500087 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-v94vp" podStartSLOduration=2.3362881509999998 podStartE2EDuration="5.500050022s" podCreationTimestamp="2026-02-16 11:25:20 +0000 UTC" firstStartedPulling="2026-02-16 11:25:21.301609344 +0000 UTC m=+1110.930943519" lastFinishedPulling="2026-02-16 11:25:24.465371225 +0000 UTC m=+1114.094705390" observedRunningTime="2026-02-16 11:25:25.493380843 +0000 UTC m=+1115.122715008" watchObservedRunningTime="2026-02-16 11:25:25.500050022 +0000 UTC m=+1115.129384207" Feb 16 11:25:25 crc kubenswrapper[4949]: I0216 11:25:25.964510 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-v94vp" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.026423 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smnwp\" (UniqueName: \"kubernetes.io/projected/43715d15-1950-411d-a940-ad1359dcf97c-kube-api-access-smnwp\") pod \"43715d15-1950-411d-a940-ad1359dcf97c\" (UID: \"43715d15-1950-411d-a940-ad1359dcf97c\") " Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.033700 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43715d15-1950-411d-a940-ad1359dcf97c-kube-api-access-smnwp" (OuterVolumeSpecName: "kube-api-access-smnwp") pod "43715d15-1950-411d-a940-ad1359dcf97c" (UID: "43715d15-1950-411d-a940-ad1359dcf97c"). InnerVolumeSpecName "kube-api-access-smnwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.128850 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smnwp\" (UniqueName: \"kubernetes.io/projected/43715d15-1950-411d-a940-ad1359dcf97c-kube-api-access-smnwp\") on node \"crc\" DevicePath \"\"" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.267726 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-knrks" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.482845 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-llhm4" event={"ID":"dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9","Type":"ContainerStarted","Data":"3cd27395c0db44e0dd330359e0475affe4d79ed0a96d8a0912d6a75738977aa1"} Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.492881 4949 generic.go:334] "Generic (PLEG): container finished" podID="43715d15-1950-411d-a940-ad1359dcf97c" containerID="5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e" exitCode=0 Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.492983 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-v94vp" event={"ID":"43715d15-1950-411d-a940-ad1359dcf97c","Type":"ContainerDied","Data":"5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e"} Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.493078 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-v94vp" event={"ID":"43715d15-1950-411d-a940-ad1359dcf97c","Type":"ContainerDied","Data":"94ca2bfe918f70abda6859b5fe62d5a30b00774e0221b707b9d2b937bdf60d06"} Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.493121 4949 scope.go:117] "RemoveContainer" containerID="5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.493640 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-v94vp" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.524804 4949 scope.go:117] "RemoveContainer" containerID="5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e" Feb 16 11:25:26 crc kubenswrapper[4949]: E0216 11:25:26.526115 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e\": container with ID starting with 5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e not found: ID does not exist" containerID="5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.526202 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e"} err="failed to get container status \"5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e\": rpc error: code = NotFound desc = could not find container \"5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e\": container with ID starting with 5a0df2ce2ccb17e805b7a5e158b90b141844f93e4224e555df607e0381d03f8e not found: ID does not exist" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.527635 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-llhm4" podStartSLOduration=2.471461569 podStartE2EDuration="2.527614167s" podCreationTimestamp="2026-02-16 11:25:24 +0000 UTC" firstStartedPulling="2026-02-16 11:25:25.302828261 +0000 UTC m=+1114.932162436" lastFinishedPulling="2026-02-16 11:25:25.358980839 +0000 UTC m=+1114.988315034" observedRunningTime="2026-02-16 11:25:26.515448051 +0000 UTC m=+1116.144782226" watchObservedRunningTime="2026-02-16 11:25:26.527614167 +0000 UTC m=+1116.156948352" Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.556158 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-v94vp"] Feb 16 11:25:26 crc kubenswrapper[4949]: I0216 11:25:26.564701 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-v94vp"] Feb 16 11:25:27 crc kubenswrapper[4949]: I0216 11:25:27.246705 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43715d15-1950-411d-a940-ad1359dcf97c" path="/var/lib/kubelet/pods/43715d15-1950-411d-a940-ad1359dcf97c/volumes" Feb 16 11:25:34 crc kubenswrapper[4949]: I0216 11:25:34.770879 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:34 crc kubenswrapper[4949]: I0216 11:25:34.771933 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:34 crc kubenswrapper[4949]: I0216 11:25:34.819538 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:35 crc kubenswrapper[4949]: I0216 11:25:35.620480 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-llhm4" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.608988 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv"] Feb 16 11:25:43 crc kubenswrapper[4949]: E0216 11:25:43.610332 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43715d15-1950-411d-a940-ad1359dcf97c" containerName="registry-server" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.610354 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="43715d15-1950-411d-a940-ad1359dcf97c" containerName="registry-server" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.610583 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="43715d15-1950-411d-a940-ad1359dcf97c" containerName="registry-server" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.612130 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.614346 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-wr6zm" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.621223 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv"] Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.688363 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-util\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.688444 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pttnl\" (UniqueName: \"kubernetes.io/projected/c531de36-0700-4513-84c9-ba4da7b9afde-kube-api-access-pttnl\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.688556 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-bundle\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.789819 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-util\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.789897 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pttnl\" (UniqueName: \"kubernetes.io/projected/c531de36-0700-4513-84c9-ba4da7b9afde-kube-api-access-pttnl\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.790027 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-bundle\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.790604 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-util\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.790687 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-bundle\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.812420 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pttnl\" (UniqueName: \"kubernetes.io/projected/c531de36-0700-4513-84c9-ba4da7b9afde-kube-api-access-pttnl\") pod \"fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:43 crc kubenswrapper[4949]: I0216 11:25:43.936373 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:44 crc kubenswrapper[4949]: I0216 11:25:44.446863 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv"] Feb 16 11:25:44 crc kubenswrapper[4949]: W0216 11:25:44.449310 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc531de36_0700_4513_84c9_ba4da7b9afde.slice/crio-137909186c06dd8cccb2ff77729875abcdea5aca7f433da75549ef21b35c2174 WatchSource:0}: Error finding container 137909186c06dd8cccb2ff77729875abcdea5aca7f433da75549ef21b35c2174: Status 404 returned error can't find the container with id 137909186c06dd8cccb2ff77729875abcdea5aca7f433da75549ef21b35c2174 Feb 16 11:25:44 crc kubenswrapper[4949]: I0216 11:25:44.671218 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" event={"ID":"c531de36-0700-4513-84c9-ba4da7b9afde","Type":"ContainerStarted","Data":"b8b7f24b76a6d24c69290d71b6aee58587dad495a242694eb014c2090ff9b11b"} Feb 16 11:25:44 crc kubenswrapper[4949]: I0216 11:25:44.671278 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" event={"ID":"c531de36-0700-4513-84c9-ba4da7b9afde","Type":"ContainerStarted","Data":"137909186c06dd8cccb2ff77729875abcdea5aca7f433da75549ef21b35c2174"} Feb 16 11:25:45 crc kubenswrapper[4949]: I0216 11:25:45.685504 4949 generic.go:334] "Generic (PLEG): container finished" podID="c531de36-0700-4513-84c9-ba4da7b9afde" containerID="b8b7f24b76a6d24c69290d71b6aee58587dad495a242694eb014c2090ff9b11b" exitCode=0 Feb 16 11:25:45 crc kubenswrapper[4949]: I0216 11:25:45.686041 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" event={"ID":"c531de36-0700-4513-84c9-ba4da7b9afde","Type":"ContainerDied","Data":"b8b7f24b76a6d24c69290d71b6aee58587dad495a242694eb014c2090ff9b11b"} Feb 16 11:25:47 crc kubenswrapper[4949]: I0216 11:25:47.709252 4949 generic.go:334] "Generic (PLEG): container finished" podID="c531de36-0700-4513-84c9-ba4da7b9afde" containerID="01fb367d3398c484035b85bf5c51a9a458e4d5db9f643597378173997e4e3ae7" exitCode=0 Feb 16 11:25:47 crc kubenswrapper[4949]: I0216 11:25:47.709390 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" event={"ID":"c531de36-0700-4513-84c9-ba4da7b9afde","Type":"ContainerDied","Data":"01fb367d3398c484035b85bf5c51a9a458e4d5db9f643597378173997e4e3ae7"} Feb 16 11:25:48 crc kubenswrapper[4949]: I0216 11:25:48.722523 4949 generic.go:334] "Generic (PLEG): container finished" podID="c531de36-0700-4513-84c9-ba4da7b9afde" containerID="70a4224875b224a612beda6404803ec6ed2a2b498827d8127fd0126eef46b9d4" exitCode=0 Feb 16 11:25:48 crc kubenswrapper[4949]: I0216 11:25:48.722615 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" event={"ID":"c531de36-0700-4513-84c9-ba4da7b9afde","Type":"ContainerDied","Data":"70a4224875b224a612beda6404803ec6ed2a2b498827d8127fd0126eef46b9d4"} Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.402611 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.530750 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-util\") pod \"c531de36-0700-4513-84c9-ba4da7b9afde\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.530819 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pttnl\" (UniqueName: \"kubernetes.io/projected/c531de36-0700-4513-84c9-ba4da7b9afde-kube-api-access-pttnl\") pod \"c531de36-0700-4513-84c9-ba4da7b9afde\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.530964 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-bundle\") pod \"c531de36-0700-4513-84c9-ba4da7b9afde\" (UID: \"c531de36-0700-4513-84c9-ba4da7b9afde\") " Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.531945 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-bundle" (OuterVolumeSpecName: "bundle") pod "c531de36-0700-4513-84c9-ba4da7b9afde" (UID: "c531de36-0700-4513-84c9-ba4da7b9afde"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.538644 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c531de36-0700-4513-84c9-ba4da7b9afde-kube-api-access-pttnl" (OuterVolumeSpecName: "kube-api-access-pttnl") pod "c531de36-0700-4513-84c9-ba4da7b9afde" (UID: "c531de36-0700-4513-84c9-ba4da7b9afde"). InnerVolumeSpecName "kube-api-access-pttnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.554685 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-util" (OuterVolumeSpecName: "util") pod "c531de36-0700-4513-84c9-ba4da7b9afde" (UID: "c531de36-0700-4513-84c9-ba4da7b9afde"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.633101 4949 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.633141 4949 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c531de36-0700-4513-84c9-ba4da7b9afde-util\") on node \"crc\" DevicePath \"\"" Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.633152 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pttnl\" (UniqueName: \"kubernetes.io/projected/c531de36-0700-4513-84c9-ba4da7b9afde-kube-api-access-pttnl\") on node \"crc\" DevicePath \"\"" Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.744850 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" event={"ID":"c531de36-0700-4513-84c9-ba4da7b9afde","Type":"ContainerDied","Data":"137909186c06dd8cccb2ff77729875abcdea5aca7f433da75549ef21b35c2174"} Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.744892 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="137909186c06dd8cccb2ff77729875abcdea5aca7f433da75549ef21b35c2174" Feb 16 11:25:50 crc kubenswrapper[4949]: I0216 11:25:50.744956 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.277159 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5"] Feb 16 11:25:56 crc kubenswrapper[4949]: E0216 11:25:56.278377 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c531de36-0700-4513-84c9-ba4da7b9afde" containerName="extract" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.278396 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c531de36-0700-4513-84c9-ba4da7b9afde" containerName="extract" Feb 16 11:25:56 crc kubenswrapper[4949]: E0216 11:25:56.278456 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c531de36-0700-4513-84c9-ba4da7b9afde" containerName="util" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.278463 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c531de36-0700-4513-84c9-ba4da7b9afde" containerName="util" Feb 16 11:25:56 crc kubenswrapper[4949]: E0216 11:25:56.278472 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c531de36-0700-4513-84c9-ba4da7b9afde" containerName="pull" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.278479 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c531de36-0700-4513-84c9-ba4da7b9afde" containerName="pull" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.278657 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c531de36-0700-4513-84c9-ba4da7b9afde" containerName="extract" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.279406 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.281437 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-qglzl" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.327202 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5"] Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.344515 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j67h6\" (UniqueName: \"kubernetes.io/projected/af6b2570-6c23-4589-b650-e3a7db2cb482-kube-api-access-j67h6\") pod \"openstack-operator-controller-init-55dffc8d68-xs4j5\" (UID: \"af6b2570-6c23-4589-b650-e3a7db2cb482\") " pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.446596 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j67h6\" (UniqueName: \"kubernetes.io/projected/af6b2570-6c23-4589-b650-e3a7db2cb482-kube-api-access-j67h6\") pod \"openstack-operator-controller-init-55dffc8d68-xs4j5\" (UID: \"af6b2570-6c23-4589-b650-e3a7db2cb482\") " pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.466623 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j67h6\" (UniqueName: \"kubernetes.io/projected/af6b2570-6c23-4589-b650-e3a7db2cb482-kube-api-access-j67h6\") pod \"openstack-operator-controller-init-55dffc8d68-xs4j5\" (UID: \"af6b2570-6c23-4589-b650-e3a7db2cb482\") " pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" Feb 16 11:25:56 crc kubenswrapper[4949]: I0216 11:25:56.605102 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" Feb 16 11:25:57 crc kubenswrapper[4949]: I0216 11:25:57.161679 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5"] Feb 16 11:25:57 crc kubenswrapper[4949]: I0216 11:25:57.856643 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" event={"ID":"af6b2570-6c23-4589-b650-e3a7db2cb482","Type":"ContainerStarted","Data":"606863e3666e8ef1425f9ccfa1a4467fafcfa68d9659c81ff55fff73b4066464"} Feb 16 11:26:01 crc kubenswrapper[4949]: I0216 11:26:01.909310 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" event={"ID":"af6b2570-6c23-4589-b650-e3a7db2cb482","Type":"ContainerStarted","Data":"d3e35a0a9f2c5fd16aa5ef66974692cd0a0ad3a8d0227ca7aa664425c498a162"} Feb 16 11:26:01 crc kubenswrapper[4949]: I0216 11:26:01.910141 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" Feb 16 11:26:01 crc kubenswrapper[4949]: I0216 11:26:01.950085 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" podStartSLOduration=1.417315524 podStartE2EDuration="5.950064424s" podCreationTimestamp="2026-02-16 11:25:56 +0000 UTC" firstStartedPulling="2026-02-16 11:25:57.161943238 +0000 UTC m=+1146.791277403" lastFinishedPulling="2026-02-16 11:26:01.694692138 +0000 UTC m=+1151.324026303" observedRunningTime="2026-02-16 11:26:01.944449964 +0000 UTC m=+1151.573784139" watchObservedRunningTime="2026-02-16 11:26:01.950064424 +0000 UTC m=+1151.579398599" Feb 16 11:26:04 crc kubenswrapper[4949]: I0216 11:26:04.550645 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:26:04 crc kubenswrapper[4949]: I0216 11:26:04.551212 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:26:06 crc kubenswrapper[4949]: I0216 11:26:06.610193 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-55dffc8d68-xs4j5" Feb 16 11:26:34 crc kubenswrapper[4949]: I0216 11:26:34.550607 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:26:34 crc kubenswrapper[4949]: I0216 11:26:34.551551 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.458111 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.461066 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.465543 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-jr8gp" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.474637 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.497247 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.498710 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.502879 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-b46bk" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.554961 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.573988 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.576726 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.607893 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-bf2r5" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.627633 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtztn\" (UniqueName: \"kubernetes.io/projected/3608d840-be2f-478e-8252-e41f5480853a-kube-api-access-jtztn\") pod \"barbican-operator-controller-manager-868647ff47-blpwl\" (UID: \"3608d840-be2f-478e-8252-e41f5480853a\") " pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.628461 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l6k2\" (UniqueName: \"kubernetes.io/projected/14ac83e9-3142-4e62-b2a5-789822ea3013-kube-api-access-4l6k2\") pod \"cinder-operator-controller-manager-5d946d989d-7ndss\" (UID: \"14ac83e9-3142-4e62-b2a5-789822ea3013\") " pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.696938 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.737114 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbfr2\" (UniqueName: \"kubernetes.io/projected/f0769aea-7db8-4dcb-bbde-8a4b918b3fa7-kube-api-access-bbfr2\") pod \"designate-operator-controller-manager-6d8bf5c495-zm28n\" (UID: \"f0769aea-7db8-4dcb-bbde-8a4b918b3fa7\") " pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.737206 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtztn\" (UniqueName: \"kubernetes.io/projected/3608d840-be2f-478e-8252-e41f5480853a-kube-api-access-jtztn\") pod \"barbican-operator-controller-manager-868647ff47-blpwl\" (UID: \"3608d840-be2f-478e-8252-e41f5480853a\") " pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.737238 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l6k2\" (UniqueName: \"kubernetes.io/projected/14ac83e9-3142-4e62-b2a5-789822ea3013-kube-api-access-4l6k2\") pod \"cinder-operator-controller-manager-5d946d989d-7ndss\" (UID: \"14ac83e9-3142-4e62-b2a5-789822ea3013\") " pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.762279 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987464f4-w59wt"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.763813 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.773384 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-t857l" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.785315 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l6k2\" (UniqueName: \"kubernetes.io/projected/14ac83e9-3142-4e62-b2a5-789822ea3013-kube-api-access-4l6k2\") pod \"cinder-operator-controller-manager-5d946d989d-7ndss\" (UID: \"14ac83e9-3142-4e62-b2a5-789822ea3013\") " pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.795928 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtztn\" (UniqueName: \"kubernetes.io/projected/3608d840-be2f-478e-8252-e41f5480853a-kube-api-access-jtztn\") pod \"barbican-operator-controller-manager-868647ff47-blpwl\" (UID: \"3608d840-be2f-478e-8252-e41f5480853a\") " pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.806558 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.808335 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.811270 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-sxqhr" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.820060 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987464f4-w59wt"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.825072 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.836550 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.838348 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.839656 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbfr2\" (UniqueName: \"kubernetes.io/projected/f0769aea-7db8-4dcb-bbde-8a4b918b3fa7-kube-api-access-bbfr2\") pod \"designate-operator-controller-manager-6d8bf5c495-zm28n\" (UID: \"f0769aea-7db8-4dcb-bbde-8a4b918b3fa7\") " pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.843701 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-hhvwx" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.844185 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.845662 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.847958 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.853784 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-pjgjq" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.854292 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.854426 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.893420 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbfr2\" (UniqueName: \"kubernetes.io/projected/f0769aea-7db8-4dcb-bbde-8a4b918b3fa7-kube-api-access-bbfr2\") pod \"designate-operator-controller-manager-6d8bf5c495-zm28n\" (UID: \"f0769aea-7db8-4dcb-bbde-8a4b918b3fa7\") " pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.894377 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.920602 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.922021 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.923032 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.923595 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.936384 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-slm8k" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.936573 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-mnkst" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.936669 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.937097 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.938125 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.944506 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.946451 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.948389 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.948431 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmltm\" (UniqueName: \"kubernetes.io/projected/4274f89e-0708-44ec-9bbb-3bb865c71d82-kube-api-access-wmltm\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.948476 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7nn4\" (UniqueName: \"kubernetes.io/projected/482d88cb-1680-4276-8373-bf565231eadf-kube-api-access-b7nn4\") pod \"glance-operator-controller-manager-77987464f4-w59wt\" (UID: \"482d88cb-1680-4276-8373-bf565231eadf\") " pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.948526 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddkt6\" (UniqueName: \"kubernetes.io/projected/7927070f-ccfa-49c3-9a88-6fdee387c97c-kube-api-access-ddkt6\") pod \"heat-operator-controller-manager-69f49c598c-r82d9\" (UID: \"7927070f-ccfa-49c3-9a88-6fdee387c97c\") " pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.948601 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxhw2\" (UniqueName: \"kubernetes.io/projected/6b381c9a-6963-419d-b96c-81ca6ea674d3-kube-api-access-zxhw2\") pod \"horizon-operator-controller-manager-5b9b8895d5-j7vgc\" (UID: \"6b381c9a-6963-419d-b96c-81ca6ea674d3\") " pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.954831 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-jdkcf" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.974650 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8"] Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.975962 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" Feb 16 11:26:42 crc kubenswrapper[4949]: I0216 11:26:42.995293 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-ndc26" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:42.999213 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.000533 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.010357 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-97pzp" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.029934 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.050455 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txnv5\" (UniqueName: \"kubernetes.io/projected/feaf7854-ba7b-4246-827d-941656a7f10b-kube-api-access-txnv5\") pod \"keystone-operator-controller-manager-b4d948c87-57rt6\" (UID: \"feaf7854-ba7b-4246-827d-941656a7f10b\") " pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.050563 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxhw2\" (UniqueName: \"kubernetes.io/projected/6b381c9a-6963-419d-b96c-81ca6ea674d3-kube-api-access-zxhw2\") pod \"horizon-operator-controller-manager-5b9b8895d5-j7vgc\" (UID: \"6b381c9a-6963-419d-b96c-81ca6ea674d3\") " pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.050616 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.050655 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vskqh\" (UniqueName: \"kubernetes.io/projected/6c160a96-7508-45af-82b7-37ca399bb5af-kube-api-access-vskqh\") pod \"manila-operator-controller-manager-54f6768c69-lzkxk\" (UID: \"6c160a96-7508-45af-82b7-37ca399bb5af\") " pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.050690 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmltm\" (UniqueName: \"kubernetes.io/projected/4274f89e-0708-44ec-9bbb-3bb865c71d82-kube-api-access-wmltm\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.050727 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7nn4\" (UniqueName: \"kubernetes.io/projected/482d88cb-1680-4276-8373-bf565231eadf-kube-api-access-b7nn4\") pod \"glance-operator-controller-manager-77987464f4-w59wt\" (UID: \"482d88cb-1680-4276-8373-bf565231eadf\") " pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.050756 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwx85\" (UniqueName: \"kubernetes.io/projected/b81e3e22-4860-4765-9683-675c1fbbefef-kube-api-access-lwx85\") pod \"ironic-operator-controller-manager-554564d7fc-d928p\" (UID: \"b81e3e22-4860-4765-9683-675c1fbbefef\") " pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.050804 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddkt6\" (UniqueName: \"kubernetes.io/projected/7927070f-ccfa-49c3-9a88-6fdee387c97c-kube-api-access-ddkt6\") pod \"heat-operator-controller-manager-69f49c598c-r82d9\" (UID: \"7927070f-ccfa-49c3-9a88-6fdee387c97c\") " pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" Feb 16 11:26:43 crc kubenswrapper[4949]: E0216 11:26:43.051462 4949 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:43 crc kubenswrapper[4949]: E0216 11:26:43.051597 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert podName:4274f89e-0708-44ec-9bbb-3bb865c71d82 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:43.551562014 +0000 UTC m=+1193.180896369 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert") pod "infra-operator-controller-manager-79d975b745-6ms2x" (UID: "4274f89e-0708-44ec-9bbb-3bb865c71d82") : secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.077466 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.079108 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.085683 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-9jhc7" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.088931 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.097291 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.109301 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7nn4\" (UniqueName: \"kubernetes.io/projected/482d88cb-1680-4276-8373-bf565231eadf-kube-api-access-b7nn4\") pod \"glance-operator-controller-manager-77987464f4-w59wt\" (UID: \"482d88cb-1680-4276-8373-bf565231eadf\") " pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.109745 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddkt6\" (UniqueName: \"kubernetes.io/projected/7927070f-ccfa-49c3-9a88-6fdee387c97c-kube-api-access-ddkt6\") pod \"heat-operator-controller-manager-69f49c598c-r82d9\" (UID: \"7927070f-ccfa-49c3-9a88-6fdee387c97c\") " pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.110284 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmltm\" (UniqueName: \"kubernetes.io/projected/4274f89e-0708-44ec-9bbb-3bb865c71d82-kube-api-access-wmltm\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.113658 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.119026 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxhw2\" (UniqueName: \"kubernetes.io/projected/6b381c9a-6963-419d-b96c-81ca6ea674d3-kube-api-access-zxhw2\") pod \"horizon-operator-controller-manager-5b9b8895d5-j7vgc\" (UID: \"6b381c9a-6963-419d-b96c-81ca6ea674d3\") " pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.125720 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.128026 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.135781 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-rh4wg" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.142781 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.152768 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vskqh\" (UniqueName: \"kubernetes.io/projected/6c160a96-7508-45af-82b7-37ca399bb5af-kube-api-access-vskqh\") pod \"manila-operator-controller-manager-54f6768c69-lzkxk\" (UID: \"6c160a96-7508-45af-82b7-37ca399bb5af\") " pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.152859 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwx85\" (UniqueName: \"kubernetes.io/projected/b81e3e22-4860-4765-9683-675c1fbbefef-kube-api-access-lwx85\") pod \"ironic-operator-controller-manager-554564d7fc-d928p\" (UID: \"b81e3e22-4860-4765-9683-675c1fbbefef\") " pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.152926 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22mpv\" (UniqueName: \"kubernetes.io/projected/ad558c16-93d3-4e12-8feb-56516d331bbe-kube-api-access-22mpv\") pod \"mariadb-operator-controller-manager-6994f66f48-jv8z8\" (UID: \"ad558c16-93d3-4e12-8feb-56516d331bbe\") " pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.152996 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhddn\" (UniqueName: \"kubernetes.io/projected/70f95803-8857-4a40-b133-c12031e17f77-kube-api-access-fhddn\") pod \"neutron-operator-controller-manager-64ddbf8bb-wsh6f\" (UID: \"70f95803-8857-4a40-b133-c12031e17f77\") " pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.153047 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txnv5\" (UniqueName: \"kubernetes.io/projected/feaf7854-ba7b-4246-827d-941656a7f10b-kube-api-access-txnv5\") pod \"keystone-operator-controller-manager-b4d948c87-57rt6\" (UID: \"feaf7854-ba7b-4246-827d-941656a7f10b\") " pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.173200 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.177811 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.181870 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.182002 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-zms9v" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.215009 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwx85\" (UniqueName: \"kubernetes.io/projected/b81e3e22-4860-4765-9683-675c1fbbefef-kube-api-access-lwx85\") pod \"ironic-operator-controller-manager-554564d7fc-d928p\" (UID: \"b81e3e22-4860-4765-9683-675c1fbbefef\") " pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.215113 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txnv5\" (UniqueName: \"kubernetes.io/projected/feaf7854-ba7b-4246-827d-941656a7f10b-kube-api-access-txnv5\") pod \"keystone-operator-controller-manager-b4d948c87-57rt6\" (UID: \"feaf7854-ba7b-4246-827d-941656a7f10b\") " pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.242582 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vskqh\" (UniqueName: \"kubernetes.io/projected/6c160a96-7508-45af-82b7-37ca399bb5af-kube-api-access-vskqh\") pod \"manila-operator-controller-manager-54f6768c69-lzkxk\" (UID: \"6c160a96-7508-45af-82b7-37ca399bb5af\") " pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.256951 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phs77\" (UniqueName: \"kubernetes.io/projected/d1e2149a-cad2-4e22-822c-d5afb8294a25-kube-api-access-phs77\") pod \"octavia-operator-controller-manager-69f8888797-4bjjf\" (UID: \"d1e2149a-cad2-4e22-822c-d5afb8294a25\") " pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.257018 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxd2f\" (UniqueName: \"kubernetes.io/projected/20c60056-65e4-486e-8b5e-bf7aef44b9bc-kube-api-access-xxd2f\") pod \"nova-operator-controller-manager-567668f5cf-cx2cw\" (UID: \"20c60056-65e4-486e-8b5e-bf7aef44b9bc\") " pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.257080 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22mpv\" (UniqueName: \"kubernetes.io/projected/ad558c16-93d3-4e12-8feb-56516d331bbe-kube-api-access-22mpv\") pod \"mariadb-operator-controller-manager-6994f66f48-jv8z8\" (UID: \"ad558c16-93d3-4e12-8feb-56516d331bbe\") " pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.257140 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhddn\" (UniqueName: \"kubernetes.io/projected/70f95803-8857-4a40-b133-c12031e17f77-kube-api-access-fhddn\") pod \"neutron-operator-controller-manager-64ddbf8bb-wsh6f\" (UID: \"70f95803-8857-4a40-b133-c12031e17f77\") " pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.261393 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.276134 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.296822 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.296836 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22mpv\" (UniqueName: \"kubernetes.io/projected/ad558c16-93d3-4e12-8feb-56516d331bbe-kube-api-access-22mpv\") pod \"mariadb-operator-controller-manager-6994f66f48-jv8z8\" (UID: \"ad558c16-93d3-4e12-8feb-56516d331bbe\") " pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.306634 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhddn\" (UniqueName: \"kubernetes.io/projected/70f95803-8857-4a40-b133-c12031e17f77-kube-api-access-fhddn\") pod \"neutron-operator-controller-manager-64ddbf8bb-wsh6f\" (UID: \"70f95803-8857-4a40-b133-c12031e17f77\") " pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.309610 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.310821 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.311113 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.313549 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-v8ggc" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.335190 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.359408 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp9gz\" (UniqueName: \"kubernetes.io/projected/f583e5c3-96ec-417a-8d47-541896c301fb-kube-api-access-vp9gz\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.359482 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phs77\" (UniqueName: \"kubernetes.io/projected/d1e2149a-cad2-4e22-822c-d5afb8294a25-kube-api-access-phs77\") pod \"octavia-operator-controller-manager-69f8888797-4bjjf\" (UID: \"d1e2149a-cad2-4e22-822c-d5afb8294a25\") " pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.359530 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxd2f\" (UniqueName: \"kubernetes.io/projected/20c60056-65e4-486e-8b5e-bf7aef44b9bc-kube-api-access-xxd2f\") pod \"nova-operator-controller-manager-567668f5cf-cx2cw\" (UID: \"20c60056-65e4-486e-8b5e-bf7aef44b9bc\") " pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.359609 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.384227 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.393895 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.420216 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phs77\" (UniqueName: \"kubernetes.io/projected/d1e2149a-cad2-4e22-822c-d5afb8294a25-kube-api-access-phs77\") pod \"octavia-operator-controller-manager-69f8888797-4bjjf\" (UID: \"d1e2149a-cad2-4e22-822c-d5afb8294a25\") " pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.421376 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.423992 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.442711 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-l2wx6" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.444446 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.461313 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh6vs\" (UniqueName: \"kubernetes.io/projected/c179d330-538e-4fc6-afeb-bc3bfdf92569-kube-api-access-mh6vs\") pod \"ovn-operator-controller-manager-d44cf6b75-vsknd\" (UID: \"c179d330-538e-4fc6-afeb-bc3bfdf92569\") " pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.463968 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.468374 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxd2f\" (UniqueName: \"kubernetes.io/projected/20c60056-65e4-486e-8b5e-bf7aef44b9bc-kube-api-access-xxd2f\") pod \"nova-operator-controller-manager-567668f5cf-cx2cw\" (UID: \"20c60056-65e4-486e-8b5e-bf7aef44b9bc\") " pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.472328 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp9gz\" (UniqueName: \"kubernetes.io/projected/f583e5c3-96ec-417a-8d47-541896c301fb-kube-api-access-vp9gz\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.472732 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:43 crc kubenswrapper[4949]: E0216 11:26:43.473611 4949 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:43 crc kubenswrapper[4949]: E0216 11:26:43.473798 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert podName:f583e5c3-96ec-417a-8d47-541896c301fb nodeName:}" failed. No retries permitted until 2026-02-16 11:26:43.973774262 +0000 UTC m=+1193.603108427 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" (UID: "f583e5c3-96ec-417a-8d47-541896c301fb") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.537553 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.561372 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.566218 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp9gz\" (UniqueName: \"kubernetes.io/projected/f583e5c3-96ec-417a-8d47-541896c301fb-kube-api-access-vp9gz\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.586229 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.586401 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs4jm\" (UniqueName: \"kubernetes.io/projected/f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f-kube-api-access-xs4jm\") pod \"swift-operator-controller-manager-68f46476f-dk4s8\" (UID: \"f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f\") " pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.586449 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh6vs\" (UniqueName: \"kubernetes.io/projected/c179d330-538e-4fc6-afeb-bc3bfdf92569-kube-api-access-mh6vs\") pod \"ovn-operator-controller-manager-d44cf6b75-vsknd\" (UID: \"c179d330-538e-4fc6-afeb-bc3bfdf92569\") " pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" Feb 16 11:26:43 crc kubenswrapper[4949]: E0216 11:26:43.586972 4949 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:43 crc kubenswrapper[4949]: E0216 11:26:43.587029 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert podName:4274f89e-0708-44ec-9bbb-3bb865c71d82 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:44.587013505 +0000 UTC m=+1194.216347670 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert") pod "infra-operator-controller-manager-79d975b745-6ms2x" (UID: "4274f89e-0708-44ec-9bbb-3bb865c71d82") : secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.587964 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.638519 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.664834 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh6vs\" (UniqueName: \"kubernetes.io/projected/c179d330-538e-4fc6-afeb-bc3bfdf92569-kube-api-access-mh6vs\") pod \"ovn-operator-controller-manager-d44cf6b75-vsknd\" (UID: \"c179d330-538e-4fc6-afeb-bc3bfdf92569\") " pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.718508 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs4jm\" (UniqueName: \"kubernetes.io/projected/f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f-kube-api-access-xs4jm\") pod \"swift-operator-controller-manager-68f46476f-dk4s8\" (UID: \"f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f\") " pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.723982 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.752669 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.770738 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs4jm\" (UniqueName: \"kubernetes.io/projected/f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f-kube-api-access-xs4jm\") pod \"swift-operator-controller-manager-68f46476f-dk4s8\" (UID: \"f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f\") " pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.836041 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.842388 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.852474 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-k9wgf" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.852740 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.854668 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.869242 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.878205 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-dhbhv" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.900090 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.922206 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nrpf\" (UniqueName: \"kubernetes.io/projected/6f3a40af-b9a5-4709-8173-fb62a0d053e8-kube-api-access-5nrpf\") pod \"placement-operator-controller-manager-8497b45c89-dsfbf\" (UID: \"6f3a40af-b9a5-4709-8173-fb62a0d053e8\") " pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.922398 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md84w\" (UniqueName: \"kubernetes.io/projected/6c8f8a0e-a378-4b98-8c9b-e5180c97e088-kube-api-access-md84w\") pod \"telemetry-operator-controller-manager-64b85768bb-85t96\" (UID: \"6c8f8a0e-a378-4b98-8c9b-e5180c97e088\") " pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.939462 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.947468 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-7866795846-f4l9p"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.948760 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.951715 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-jjpdp" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.970910 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7866795846-f4l9p"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.982345 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs"] Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.983975 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" Feb 16 11:26:43 crc kubenswrapper[4949]: I0216 11:26:43.988700 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-sq6dx" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.016491 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.025210 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md84w\" (UniqueName: \"kubernetes.io/projected/6c8f8a0e-a378-4b98-8c9b-e5180c97e088-kube-api-access-md84w\") pod \"telemetry-operator-controller-manager-64b85768bb-85t96\" (UID: \"6c8f8a0e-a378-4b98-8c9b-e5180c97e088\") " pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.025600 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nrpf\" (UniqueName: \"kubernetes.io/projected/6f3a40af-b9a5-4709-8173-fb62a0d053e8-kube-api-access-5nrpf\") pod \"placement-operator-controller-manager-8497b45c89-dsfbf\" (UID: \"6f3a40af-b9a5-4709-8173-fb62a0d053e8\") " pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.025638 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l8hm\" (UniqueName: \"kubernetes.io/projected/10670483-76f3-4774-9aa4-a0c21ff5799e-kube-api-access-7l8hm\") pod \"test-operator-controller-manager-7866795846-f4l9p\" (UID: \"10670483-76f3-4774-9aa4-a0c21ff5799e\") " pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.025758 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.025961 4949 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.026118 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert podName:f583e5c3-96ec-417a-8d47-541896c301fb nodeName:}" failed. No retries permitted until 2026-02-16 11:26:45.026006251 +0000 UTC m=+1194.655340416 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" (UID: "f583e5c3-96ec-417a-8d47-541896c301fb") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.069612 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md84w\" (UniqueName: \"kubernetes.io/projected/6c8f8a0e-a378-4b98-8c9b-e5180c97e088-kube-api-access-md84w\") pod \"telemetry-operator-controller-manager-64b85768bb-85t96\" (UID: \"6c8f8a0e-a378-4b98-8c9b-e5180c97e088\") " pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.084806 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nrpf\" (UniqueName: \"kubernetes.io/projected/6f3a40af-b9a5-4709-8173-fb62a0d053e8-kube-api-access-5nrpf\") pod \"placement-operator-controller-manager-8497b45c89-dsfbf\" (UID: \"6f3a40af-b9a5-4709-8173-fb62a0d053e8\") " pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.093926 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.097510 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.097579 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.101419 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.103146 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.103641 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fvhp2" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.113856 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.129406 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l8hm\" (UniqueName: \"kubernetes.io/projected/10670483-76f3-4774-9aa4-a0c21ff5799e-kube-api-access-7l8hm\") pod \"test-operator-controller-manager-7866795846-f4l9p\" (UID: \"10670483-76f3-4774-9aa4-a0c21ff5799e\") " pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.129634 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vx6w\" (UniqueName: \"kubernetes.io/projected/06428bd6-f2fd-44ab-b71a-fe48ec54189d-kube-api-access-6vx6w\") pod \"watcher-operator-controller-manager-5db88f68c-kwgzs\" (UID: \"06428bd6-f2fd-44ab-b71a-fe48ec54189d\") " pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.153767 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.155879 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.162715 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-9qlcx" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.171800 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l8hm\" (UniqueName: \"kubernetes.io/projected/10670483-76f3-4774-9aa4-a0c21ff5799e-kube-api-access-7l8hm\") pod \"test-operator-controller-manager-7866795846-f4l9p\" (UID: \"10670483-76f3-4774-9aa4-a0c21ff5799e\") " pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.175086 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.189648 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.213102 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.249696 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzr5q\" (UniqueName: \"kubernetes.io/projected/8ac656c0-2570-4fd7-acbb-96182ff1bc95-kube-api-access-rzr5q\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.249904 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndgjh\" (UniqueName: \"kubernetes.io/projected/9a87a761-d2b8-4202-98c2-391fdb512cc4-kube-api-access-ndgjh\") pod \"rabbitmq-cluster-operator-manager-668c99d594-p9cl5\" (UID: \"9a87a761-d2b8-4202-98c2-391fdb512cc4\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.250418 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.250530 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vx6w\" (UniqueName: \"kubernetes.io/projected/06428bd6-f2fd-44ab-b71a-fe48ec54189d-kube-api-access-6vx6w\") pod \"watcher-operator-controller-manager-5db88f68c-kwgzs\" (UID: \"06428bd6-f2fd-44ab-b71a-fe48ec54189d\") " pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.250568 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.301243 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vx6w\" (UniqueName: \"kubernetes.io/projected/06428bd6-f2fd-44ab-b71a-fe48ec54189d-kube-api-access-6vx6w\") pod \"watcher-operator-controller-manager-5db88f68c-kwgzs\" (UID: \"06428bd6-f2fd-44ab-b71a-fe48ec54189d\") " pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.314514 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.347430 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.358776 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.359261 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzr5q\" (UniqueName: \"kubernetes.io/projected/8ac656c0-2570-4fd7-acbb-96182ff1bc95-kube-api-access-rzr5q\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.359345 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndgjh\" (UniqueName: \"kubernetes.io/projected/9a87a761-d2b8-4202-98c2-391fdb512cc4-kube-api-access-ndgjh\") pod \"rabbitmq-cluster-operator-manager-668c99d594-p9cl5\" (UID: \"9a87a761-d2b8-4202-98c2-391fdb512cc4\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.359437 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.359037 4949 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.359660 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:44.859642177 +0000 UTC m=+1194.488976332 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "webhook-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.359594 4949 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.363072 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:44.863031094 +0000 UTC m=+1194.492365269 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "metrics-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.383014 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndgjh\" (UniqueName: \"kubernetes.io/projected/9a87a761-d2b8-4202-98c2-391fdb512cc4-kube-api-access-ndgjh\") pod \"rabbitmq-cluster-operator-manager-668c99d594-p9cl5\" (UID: \"9a87a761-d2b8-4202-98c2-391fdb512cc4\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.385275 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzr5q\" (UniqueName: \"kubernetes.io/projected/8ac656c0-2570-4fd7-acbb-96182ff1bc95-kube-api-access-rzr5q\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.430198 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" event={"ID":"14ac83e9-3142-4e62-b2a5-789822ea3013","Type":"ContainerStarted","Data":"d36fff45d8afff35c62ccf2c7917f59c352a0fb323472bd4c8e8b06353e561bd"} Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.436996 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.439096 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" event={"ID":"f0769aea-7db8-4dcb-bbde-8a4b918b3fa7","Type":"ContainerStarted","Data":"0c1a04311f8f6c7e06db3e08f36685b2dad53df8f54f22a733a645589c07edf0"} Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.559426 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.658462 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.664784 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.664996 4949 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.665078 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert podName:4274f89e-0708-44ec-9bbb-3bb865c71d82 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:46.66505598 +0000 UTC m=+1196.294390145 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert") pod "infra-operator-controller-manager-79d975b745-6ms2x" (UID: "4274f89e-0708-44ec-9bbb-3bb865c71d82") : secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: W0216 11:26:44.816405 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfeaf7854_ba7b_4246_827d_941656a7f10b.slice/crio-798949d40a5d948abe4c6c96f77bccacf14fa2acbaf0beda0029cda234965094 WatchSource:0}: Error finding container 798949d40a5d948abe4c6c96f77bccacf14fa2acbaf0beda0029cda234965094: Status 404 returned error can't find the container with id 798949d40a5d948abe4c6c96f77bccacf14fa2acbaf0beda0029cda234965094 Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.822199 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.850655 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.870265 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.870404 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.870727 4949 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.870780 4949 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.870835 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:45.870809137 +0000 UTC m=+1195.500143302 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "metrics-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: E0216 11:26:44.870876 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:45.870845278 +0000 UTC m=+1195.500179443 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "webhook-server-cert" not found Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.875390 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc"] Feb 16 11:26:44 crc kubenswrapper[4949]: I0216 11:26:44.980253 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987464f4-w59wt"] Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.077738 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:45 crc kubenswrapper[4949]: E0216 11:26:45.077873 4949 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:45 crc kubenswrapper[4949]: E0216 11:26:45.077952 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert podName:f583e5c3-96ec-417a-8d47-541896c301fb nodeName:}" failed. No retries permitted until 2026-02-16 11:26:47.077932852 +0000 UTC m=+1196.707267017 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" (UID: "f583e5c3-96ec-417a-8d47-541896c301fb") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.202429 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p"] Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.264826 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8"] Feb 16 11:26:45 crc kubenswrapper[4949]: W0216 11:26:45.290943 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad558c16_93d3_4e12_8feb_56516d331bbe.slice/crio-442b40c9f5bfbb682dfcf75179959d3118ea1aa26b96af1d2cee84140cd92b65 WatchSource:0}: Error finding container 442b40c9f5bfbb682dfcf75179959d3118ea1aa26b96af1d2cee84140cd92b65: Status 404 returned error can't find the container with id 442b40c9f5bfbb682dfcf75179959d3118ea1aa26b96af1d2cee84140cd92b65 Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.338771 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9"] Feb 16 11:26:45 crc kubenswrapper[4949]: W0216 11:26:45.353936 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7927070f_ccfa_49c3_9a88_6fdee387c97c.slice/crio-ff523f018903bcee83af830d4bf603dc8b38b2d006483c496195eab2ccb72797 WatchSource:0}: Error finding container ff523f018903bcee83af830d4bf603dc8b38b2d006483c496195eab2ccb72797: Status 404 returned error can't find the container with id ff523f018903bcee83af830d4bf603dc8b38b2d006483c496195eab2ccb72797 Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.455682 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" event={"ID":"3608d840-be2f-478e-8252-e41f5480853a","Type":"ContainerStarted","Data":"b36727c4051d3dad9b14d504f8717834656890bf0990f7b31e9a913c107514d5"} Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.457817 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" event={"ID":"6b381c9a-6963-419d-b96c-81ca6ea674d3","Type":"ContainerStarted","Data":"ead461d527a3fb24dd6d4c96bbb107119bfd297f520a278142f09c945cd1c72a"} Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.461384 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" event={"ID":"feaf7854-ba7b-4246-827d-941656a7f10b","Type":"ContainerStarted","Data":"798949d40a5d948abe4c6c96f77bccacf14fa2acbaf0beda0029cda234965094"} Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.462647 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" event={"ID":"b81e3e22-4860-4765-9683-675c1fbbefef","Type":"ContainerStarted","Data":"135f33013270f52bcfead338232ba0753591332d0ce8156f89c2de10e838ae94"} Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.463786 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" event={"ID":"70f95803-8857-4a40-b133-c12031e17f77","Type":"ContainerStarted","Data":"785022cf8eefe6268d643e659fcd6adfd2a7a6308126a9314a04fe962d3b6704"} Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.465202 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" event={"ID":"7927070f-ccfa-49c3-9a88-6fdee387c97c","Type":"ContainerStarted","Data":"ff523f018903bcee83af830d4bf603dc8b38b2d006483c496195eab2ccb72797"} Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.466373 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" event={"ID":"482d88cb-1680-4276-8373-bf565231eadf","Type":"ContainerStarted","Data":"92898643af214cef975aaeb49c69f0f4aa095b4e0e325da2f8f1ef2e78267714"} Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.467566 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" event={"ID":"ad558c16-93d3-4e12-8feb-56516d331bbe","Type":"ContainerStarted","Data":"442b40c9f5bfbb682dfcf75179959d3118ea1aa26b96af1d2cee84140cd92b65"} Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.896859 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.897546 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:45 crc kubenswrapper[4949]: E0216 11:26:45.897934 4949 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 16 11:26:45 crc kubenswrapper[4949]: E0216 11:26:45.898039 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:47.897999933 +0000 UTC m=+1197.527334098 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "webhook-server-cert" not found Feb 16 11:26:45 crc kubenswrapper[4949]: E0216 11:26:45.898530 4949 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 16 11:26:45 crc kubenswrapper[4949]: E0216 11:26:45.898557 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:47.898548759 +0000 UTC m=+1197.527882924 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "metrics-server-cert" not found Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.937218 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd"] Feb 16 11:26:45 crc kubenswrapper[4949]: I0216 11:26:45.972656 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7866795846-f4l9p"] Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.009350 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw"] Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.120160 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf"] Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.157243 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8"] Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.237406 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf"] Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.251438 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96"] Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.265336 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk"] Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.272790 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs"] Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.289739 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5"] Feb 16 11:26:46 crc kubenswrapper[4949]: E0216 11:26:46.291750 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ndgjh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-p9cl5_openstack-operators(9a87a761-d2b8-4202-98c2-391fdb512cc4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 16 11:26:46 crc kubenswrapper[4949]: E0216 11:26:46.294262 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" podUID="9a87a761-d2b8-4202-98c2-391fdb512cc4" Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.485302 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" event={"ID":"9a87a761-d2b8-4202-98c2-391fdb512cc4","Type":"ContainerStarted","Data":"a91bd9e84bd8b04119f7cc05d56a0e22f0ebb374d746e00036b003fd9d57c346"} Feb 16 11:26:46 crc kubenswrapper[4949]: E0216 11:26:46.489034 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" podUID="9a87a761-d2b8-4202-98c2-391fdb512cc4" Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.489820 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" event={"ID":"c179d330-538e-4fc6-afeb-bc3bfdf92569","Type":"ContainerStarted","Data":"663c0d674a2092aee7d64548c8457bf98107fd4cc82a80bd5ada9e9c1a7c8ca9"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.498869 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" event={"ID":"20c60056-65e4-486e-8b5e-bf7aef44b9bc","Type":"ContainerStarted","Data":"a30f6a771042ae61a88ce62a151f54a415740a83259d0eac4fc9233bf88cfbcd"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.501724 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" event={"ID":"d1e2149a-cad2-4e22-822c-d5afb8294a25","Type":"ContainerStarted","Data":"e8d025d754a4695c9b2301271f408dc1a807050d25c9c7a144fb6aef43ba1e24"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.507048 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" event={"ID":"10670483-76f3-4774-9aa4-a0c21ff5799e","Type":"ContainerStarted","Data":"eb0698600f1512cebd180634378a79c077384ee40040aa3e3a49c1065e1cc1b5"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.509805 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" event={"ID":"6c160a96-7508-45af-82b7-37ca399bb5af","Type":"ContainerStarted","Data":"5299abac8755f8587f64604591079c468524b9f20b4bfcb03a5b4c2d8ec3a486"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.511888 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" event={"ID":"6c8f8a0e-a378-4b98-8c9b-e5180c97e088","Type":"ContainerStarted","Data":"ed5c408d203c498f8b335189e258d6280348cb945428a140c45d726895e21fef"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.516521 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" event={"ID":"06428bd6-f2fd-44ab-b71a-fe48ec54189d","Type":"ContainerStarted","Data":"5faf287e8fdbdccfe2b9a8342cdd32149e9ea2a0ebd0b0dd19b8457af5342b50"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.519557 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" event={"ID":"f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f","Type":"ContainerStarted","Data":"3442d492fa3b11e274fac4be91085b96c3291e3fb639aa7b00383e5ce6b07d42"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.522494 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" event={"ID":"6f3a40af-b9a5-4709-8173-fb62a0d053e8","Type":"ContainerStarted","Data":"17c956d7fe0153eefcaea2fe7e83c296ec3bce88d7be533ac01077b2f063662a"} Feb 16 11:26:46 crc kubenswrapper[4949]: I0216 11:26:46.727275 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:46 crc kubenswrapper[4949]: E0216 11:26:46.727448 4949 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:46 crc kubenswrapper[4949]: E0216 11:26:46.729104 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert podName:4274f89e-0708-44ec-9bbb-3bb865c71d82 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:50.729039638 +0000 UTC m=+1200.358373853 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert") pod "infra-operator-controller-manager-79d975b745-6ms2x" (UID: "4274f89e-0708-44ec-9bbb-3bb865c71d82") : secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:47 crc kubenswrapper[4949]: E0216 11:26:47.141073 4949 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:47 crc kubenswrapper[4949]: E0216 11:26:47.142153 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert podName:f583e5c3-96ec-417a-8d47-541896c301fb nodeName:}" failed. No retries permitted until 2026-02-16 11:26:51.141631921 +0000 UTC m=+1200.770966086 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" (UID: "f583e5c3-96ec-417a-8d47-541896c301fb") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:47 crc kubenswrapper[4949]: I0216 11:26:47.140849 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:47 crc kubenswrapper[4949]: E0216 11:26:47.648258 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" podUID="9a87a761-d2b8-4202-98c2-391fdb512cc4" Feb 16 11:26:47 crc kubenswrapper[4949]: I0216 11:26:47.971568 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:47 crc kubenswrapper[4949]: I0216 11:26:47.971799 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:47 crc kubenswrapper[4949]: E0216 11:26:47.972192 4949 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 16 11:26:47 crc kubenswrapper[4949]: E0216 11:26:47.972274 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:51.972247683 +0000 UTC m=+1201.601581848 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "webhook-server-cert" not found Feb 16 11:26:47 crc kubenswrapper[4949]: E0216 11:26:47.972742 4949 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 16 11:26:47 crc kubenswrapper[4949]: E0216 11:26:47.972786 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:51.972773538 +0000 UTC m=+1201.602107713 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "metrics-server-cert" not found Feb 16 11:26:50 crc kubenswrapper[4949]: I0216 11:26:50.749772 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:50 crc kubenswrapper[4949]: E0216 11:26:50.750211 4949 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:50 crc kubenswrapper[4949]: E0216 11:26:50.750479 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert podName:4274f89e-0708-44ec-9bbb-3bb865c71d82 nodeName:}" failed. No retries permitted until 2026-02-16 11:26:58.750446479 +0000 UTC m=+1208.379780654 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert") pod "infra-operator-controller-manager-79d975b745-6ms2x" (UID: "4274f89e-0708-44ec-9bbb-3bb865c71d82") : secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:51 crc kubenswrapper[4949]: I0216 11:26:51.161709 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:51 crc kubenswrapper[4949]: E0216 11:26:51.161911 4949 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:51 crc kubenswrapper[4949]: E0216 11:26:51.161999 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert podName:f583e5c3-96ec-417a-8d47-541896c301fb nodeName:}" failed. No retries permitted until 2026-02-16 11:26:59.161977893 +0000 UTC m=+1208.791312058 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" (UID: "f583e5c3-96ec-417a-8d47-541896c301fb") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 16 11:26:52 crc kubenswrapper[4949]: I0216 11:26:52.018494 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:52 crc kubenswrapper[4949]: I0216 11:26:52.019047 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:26:52 crc kubenswrapper[4949]: E0216 11:26:52.019449 4949 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 16 11:26:52 crc kubenswrapper[4949]: E0216 11:26:52.019506 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:27:00.019487921 +0000 UTC m=+1209.648822086 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "webhook-server-cert" not found Feb 16 11:26:52 crc kubenswrapper[4949]: E0216 11:26:52.019877 4949 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 16 11:26:52 crc kubenswrapper[4949]: E0216 11:26:52.019902 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:27:00.019894572 +0000 UTC m=+1209.649228737 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "metrics-server-cert" not found Feb 16 11:26:58 crc kubenswrapper[4949]: I0216 11:26:58.826133 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:26:58 crc kubenswrapper[4949]: E0216 11:26:58.826393 4949 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:58 crc kubenswrapper[4949]: E0216 11:26:58.826883 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert podName:4274f89e-0708-44ec-9bbb-3bb865c71d82 nodeName:}" failed. No retries permitted until 2026-02-16 11:27:14.82686181 +0000 UTC m=+1224.456195975 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert") pod "infra-operator-controller-manager-79d975b745-6ms2x" (UID: "4274f89e-0708-44ec-9bbb-3bb865c71d82") : secret "infra-operator-webhook-server-cert" not found Feb 16 11:26:59 crc kubenswrapper[4949]: I0216 11:26:59.234513 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:59 crc kubenswrapper[4949]: I0216 11:26:59.258407 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f583e5c3-96ec-417a-8d47-541896c301fb-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd\" (UID: \"f583e5c3-96ec-417a-8d47-541896c301fb\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:26:59 crc kubenswrapper[4949]: I0216 11:26:59.433073 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-zms9v" Feb 16 11:26:59 crc kubenswrapper[4949]: I0216 11:26:59.441299 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:27:00 crc kubenswrapper[4949]: I0216 11:27:00.059031 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:27:00 crc kubenswrapper[4949]: I0216 11:27:00.059541 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:27:00 crc kubenswrapper[4949]: E0216 11:27:00.059813 4949 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 16 11:27:00 crc kubenswrapper[4949]: E0216 11:27:00.059868 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs podName:8ac656c0-2570-4fd7-acbb-96182ff1bc95 nodeName:}" failed. No retries permitted until 2026-02-16 11:27:16.059854285 +0000 UTC m=+1225.689188450 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs") pod "openstack-operator-controller-manager-6b65fbbb9f-7j44g" (UID: "8ac656c0-2570-4fd7-acbb-96182ff1bc95") : secret "webhook-server-cert" not found Feb 16 11:27:00 crc kubenswrapper[4949]: I0216 11:27:00.067206 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-metrics-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:27:00 crc kubenswrapper[4949]: E0216 11:27:00.542723 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:2b8ab3063af4aaeed0198197aae6f391c6647ac686c94c85668537f1d5933979" Feb 16 11:27:00 crc kubenswrapper[4949]: E0216 11:27:00.543038 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:2b8ab3063af4aaeed0198197aae6f391c6647ac686c94c85668537f1d5933979,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4l6k2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-5d946d989d-7ndss_openstack-operators(14ac83e9-3142-4e62-b2a5-789822ea3013): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:00 crc kubenswrapper[4949]: E0216 11:27:00.544233 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" podUID="14ac83e9-3142-4e62-b2a5-789822ea3013" Feb 16 11:27:01 crc kubenswrapper[4949]: E0216 11:27:01.002099 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:2b8ab3063af4aaeed0198197aae6f391c6647ac686c94c85668537f1d5933979\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" podUID="14ac83e9-3142-4e62-b2a5-789822ea3013" Feb 16 11:27:02 crc kubenswrapper[4949]: E0216 11:27:02.337931 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:1ab3ec59cd8e30dd8423e91ad832403bdefbae3b8ac47e15578d5a677d7ba0df" Feb 16 11:27:02 crc kubenswrapper[4949]: E0216 11:27:02.339237 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:1ab3ec59cd8e30dd8423e91ad832403bdefbae3b8ac47e15578d5a677d7ba0df,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b7nn4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-77987464f4-w59wt_openstack-operators(482d88cb-1680-4276-8373-bf565231eadf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:02 crc kubenswrapper[4949]: E0216 11:27:02.340479 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" podUID="482d88cb-1680-4276-8373-bf565231eadf" Feb 16 11:27:03 crc kubenswrapper[4949]: E0216 11:27:03.019382 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:1ab3ec59cd8e30dd8423e91ad832403bdefbae3b8ac47e15578d5a677d7ba0df\\\"\"" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" podUID="482d88cb-1680-4276-8373-bf565231eadf" Feb 16 11:27:03 crc kubenswrapper[4949]: E0216 11:27:03.161012 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:d01ae848290e880c09127d5297418dea40fc7f090fdab9bf2c578c7e7f53aec0" Feb 16 11:27:03 crc kubenswrapper[4949]: E0216 11:27:03.161297 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:d01ae848290e880c09127d5297418dea40fc7f090fdab9bf2c578c7e7f53aec0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vx6w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5db88f68c-kwgzs_openstack-operators(06428bd6-f2fd-44ab-b71a-fe48ec54189d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:03 crc kubenswrapper[4949]: E0216 11:27:03.162934 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" podUID="06428bd6-f2fd-44ab-b71a-fe48ec54189d" Feb 16 11:27:03 crc kubenswrapper[4949]: E0216 11:27:03.827835 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:229fc8c8d94dd4102d2151cd4ec1eaaa09d897c2b396d06e903f61ea29c1fa34" Feb 16 11:27:03 crc kubenswrapper[4949]: E0216 11:27:03.828554 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:229fc8c8d94dd4102d2151cd4ec1eaaa09d897c2b396d06e903f61ea29c1fa34,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-phs77,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-69f8888797-4bjjf_openstack-operators(d1e2149a-cad2-4e22-822c-d5afb8294a25): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:03 crc kubenswrapper[4949]: E0216 11:27:03.830271 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" podUID="d1e2149a-cad2-4e22-822c-d5afb8294a25" Feb 16 11:27:04 crc kubenswrapper[4949]: E0216 11:27:04.029100 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:d01ae848290e880c09127d5297418dea40fc7f090fdab9bf2c578c7e7f53aec0\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" podUID="06428bd6-f2fd-44ab-b71a-fe48ec54189d" Feb 16 11:27:04 crc kubenswrapper[4949]: E0216 11:27:04.029380 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:229fc8c8d94dd4102d2151cd4ec1eaaa09d897c2b396d06e903f61ea29c1fa34\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" podUID="d1e2149a-cad2-4e22-822c-d5afb8294a25" Feb 16 11:27:04 crc kubenswrapper[4949]: I0216 11:27:04.550297 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:27:04 crc kubenswrapper[4949]: I0216 11:27:04.550402 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:27:04 crc kubenswrapper[4949]: I0216 11:27:04.550506 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:27:04 crc kubenswrapper[4949]: I0216 11:27:04.551853 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"81d321ae3cf8ae54ff452597f9576b0c0a3cd11dc176e9a1b231f17a43bc97b9"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:27:04 crc kubenswrapper[4949]: I0216 11:27:04.551935 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://81d321ae3cf8ae54ff452597f9576b0c0a3cd11dc176e9a1b231f17a43bc97b9" gracePeriod=600 Feb 16 11:27:04 crc kubenswrapper[4949]: E0216 11:27:04.591602 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:a57336b9f95b703f80453db87e43a2834ca1bdc89480796d28ebbe0a9702ecfd" Feb 16 11:27:04 crc kubenswrapper[4949]: E0216 11:27:04.591911 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:a57336b9f95b703f80453db87e43a2834ca1bdc89480796d28ebbe0a9702ecfd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5nrpf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-8497b45c89-dsfbf_openstack-operators(6f3a40af-b9a5-4709-8173-fb62a0d053e8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:04 crc kubenswrapper[4949]: E0216 11:27:04.594163 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" podUID="6f3a40af-b9a5-4709-8173-fb62a0d053e8" Feb 16 11:27:05 crc kubenswrapper[4949]: I0216 11:27:05.040823 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="81d321ae3cf8ae54ff452597f9576b0c0a3cd11dc176e9a1b231f17a43bc97b9" exitCode=0 Feb 16 11:27:05 crc kubenswrapper[4949]: I0216 11:27:05.040934 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"81d321ae3cf8ae54ff452597f9576b0c0a3cd11dc176e9a1b231f17a43bc97b9"} Feb 16 11:27:05 crc kubenswrapper[4949]: I0216 11:27:05.041024 4949 scope.go:117] "RemoveContainer" containerID="3be1216f0de04908b66655ea21e2d3a0e3a372ff9aac95cc621972831b9f6c40" Feb 16 11:27:05 crc kubenswrapper[4949]: E0216 11:27:05.043997 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:a57336b9f95b703f80453db87e43a2834ca1bdc89480796d28ebbe0a9702ecfd\\\"\"" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" podUID="6f3a40af-b9a5-4709-8173-fb62a0d053e8" Feb 16 11:27:05 crc kubenswrapper[4949]: E0216 11:27:05.500523 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:90ad8fd8c1889b6be77925016532218eb6149d2c1c8535a5f9f1775c776fa6cc" Feb 16 11:27:05 crc kubenswrapper[4949]: E0216 11:27:05.500903 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:90ad8fd8c1889b6be77925016532218eb6149d2c1c8535a5f9f1775c776fa6cc,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jtztn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-868647ff47-blpwl_openstack-operators(3608d840-be2f-478e-8252-e41f5480853a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:05 crc kubenswrapper[4949]: E0216 11:27:05.502717 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" podUID="3608d840-be2f-478e-8252-e41f5480853a" Feb 16 11:27:06 crc kubenswrapper[4949]: E0216 11:27:06.198069 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:90ad8fd8c1889b6be77925016532218eb6149d2c1c8535a5f9f1775c776fa6cc\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" podUID="3608d840-be2f-478e-8252-e41f5480853a" Feb 16 11:27:08 crc kubenswrapper[4949]: E0216 11:27:08.099561 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:3d676f1281e24ef07de617570d2f7fbf625032e41866d1551a856c052248bb04" Feb 16 11:27:08 crc kubenswrapper[4949]: E0216 11:27:08.099930 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3d676f1281e24ef07de617570d2f7fbf625032e41866d1551a856c052248bb04,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xs4jm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-68f46476f-dk4s8_openstack-operators(f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:08 crc kubenswrapper[4949]: E0216 11:27:08.101199 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" podUID="f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f" Feb 16 11:27:08 crc kubenswrapper[4949]: E0216 11:27:08.216096 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3d676f1281e24ef07de617570d2f7fbf625032e41866d1551a856c052248bb04\\\"\"" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" podUID="f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f" Feb 16 11:27:08 crc kubenswrapper[4949]: E0216 11:27:08.655740 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:f0fabdf79095def0f8b1c0442925548a94ca94bed4de2d3b171277129f8079e6" Feb 16 11:27:08 crc kubenswrapper[4949]: E0216 11:27:08.655949 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:f0fabdf79095def0f8b1c0442925548a94ca94bed4de2d3b171277129f8079e6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7l8hm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-7866795846-f4l9p_openstack-operators(10670483-76f3-4774-9aa4-a0c21ff5799e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:08 crc kubenswrapper[4949]: E0216 11:27:08.657144 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" podUID="10670483-76f3-4774-9aa4-a0c21ff5799e" Feb 16 11:27:09 crc kubenswrapper[4949]: E0216 11:27:09.222918 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:f0fabdf79095def0f8b1c0442925548a94ca94bed4de2d3b171277129f8079e6\\\"\"" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" podUID="10670483-76f3-4774-9aa4-a0c21ff5799e" Feb 16 11:27:09 crc kubenswrapper[4949]: E0216 11:27:09.468752 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:7e1b0b7b172ad0d707ab80dd72d609e1d0f5bbd38a22c24a28ed0f17a960c867" Feb 16 11:27:09 crc kubenswrapper[4949]: E0216 11:27:09.469055 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:7e1b0b7b172ad0d707ab80dd72d609e1d0f5bbd38a22c24a28ed0f17a960c867,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lwx85,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-554564d7fc-d928p_openstack-operators(b81e3e22-4860-4765-9683-675c1fbbefef): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:09 crc kubenswrapper[4949]: E0216 11:27:09.470270 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" podUID="b81e3e22-4860-4765-9683-675c1fbbefef" Feb 16 11:27:10 crc kubenswrapper[4949]: E0216 11:27:10.033000 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:a18f12497b7159b100fcfd72c7ba2273d0669a5c00600a9ff1333bca028f256a" Feb 16 11:27:10 crc kubenswrapper[4949]: E0216 11:27:10.033635 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:a18f12497b7159b100fcfd72c7ba2273d0669a5c00600a9ff1333bca028f256a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-22mpv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-6994f66f48-jv8z8_openstack-operators(ad558c16-93d3-4e12-8feb-56516d331bbe): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:10 crc kubenswrapper[4949]: E0216 11:27:10.034860 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" podUID="ad558c16-93d3-4e12-8feb-56516d331bbe" Feb 16 11:27:10 crc kubenswrapper[4949]: E0216 11:27:10.233268 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:7e1b0b7b172ad0d707ab80dd72d609e1d0f5bbd38a22c24a28ed0f17a960c867\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" podUID="b81e3e22-4860-4765-9683-675c1fbbefef" Feb 16 11:27:10 crc kubenswrapper[4949]: E0216 11:27:10.236796 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:a18f12497b7159b100fcfd72c7ba2273d0669a5c00600a9ff1333bca028f256a\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" podUID="ad558c16-93d3-4e12-8feb-56516d331bbe" Feb 16 11:27:15 crc kubenswrapper[4949]: I0216 11:27:15.089402 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:27:15 crc kubenswrapper[4949]: I0216 11:27:15.141087 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4274f89e-0708-44ec-9bbb-3bb865c71d82-cert\") pod \"infra-operator-controller-manager-79d975b745-6ms2x\" (UID: \"4274f89e-0708-44ec-9bbb-3bb865c71d82\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:27:15 crc kubenswrapper[4949]: I0216 11:27:15.167694 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-pjgjq" Feb 16 11:27:15 crc kubenswrapper[4949]: I0216 11:27:15.176627 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:27:16 crc kubenswrapper[4949]: I0216 11:27:16.268412 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:27:16 crc kubenswrapper[4949]: I0216 11:27:16.296334 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ac656c0-2570-4fd7-acbb-96182ff1bc95-webhook-certs\") pod \"openstack-operator-controller-manager-6b65fbbb9f-7j44g\" (UID: \"8ac656c0-2570-4fd7-acbb-96182ff1bc95\") " pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:27:16 crc kubenswrapper[4949]: I0216 11:27:16.420682 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fvhp2" Feb 16 11:27:16 crc kubenswrapper[4949]: I0216 11:27:16.427446 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.063595 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:fe85dd595906fac0fe1e7a42215bb306a963cf87d55e07cd2573726b690b2838" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.064464 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:fe85dd595906fac0fe1e7a42215bb306a963cf87d55e07cd2573726b690b2838,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xxd2f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-567668f5cf-cx2cw_openstack-operators(20c60056-65e4-486e-8b5e-bf7aef44b9bc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.066603 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" podUID="20c60056-65e4-486e-8b5e-bf7aef44b9bc" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.145908 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.227:5001/openstack-k8s-operators/telemetry-operator:7c764327dd2ffab22c122e2f1706e47c6eeb2902" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.146245 4949 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.227:5001/openstack-k8s-operators/telemetry-operator:7c764327dd2ffab22c122e2f1706e47c6eeb2902" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.146422 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.227:5001/openstack-k8s-operators/telemetry-operator:7c764327dd2ffab22c122e2f1706e47c6eeb2902,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-md84w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-64b85768bb-85t96_openstack-operators(6c8f8a0e-a378-4b98-8c9b-e5180c97e088): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.147586 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" podUID="6c8f8a0e-a378-4b98-8c9b-e5180c97e088" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.485692 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.227:5001/openstack-k8s-operators/telemetry-operator:7c764327dd2ffab22c122e2f1706e47c6eeb2902\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" podUID="6c8f8a0e-a378-4b98-8c9b-e5180c97e088" Feb 16 11:27:20 crc kubenswrapper[4949]: E0216 11:27:20.485945 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:fe85dd595906fac0fe1e7a42215bb306a963cf87d55e07cd2573726b690b2838\\\"\"" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" podUID="20c60056-65e4-486e-8b5e-bf7aef44b9bc" Feb 16 11:27:21 crc kubenswrapper[4949]: E0216 11:27:21.149803 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:c6ad383f55f955902b074d1ee947a2233a5fcbf40698479ae693ce056c80dcc1" Feb 16 11:27:21 crc kubenswrapper[4949]: E0216 11:27:21.150499 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:c6ad383f55f955902b074d1ee947a2233a5fcbf40698479ae693ce056c80dcc1,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-txnv5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b4d948c87-57rt6_openstack-operators(feaf7854-ba7b-4246-827d-941656a7f10b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:21 crc kubenswrapper[4949]: E0216 11:27:21.152514 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" podUID="feaf7854-ba7b-4246-827d-941656a7f10b" Feb 16 11:27:21 crc kubenswrapper[4949]: I0216 11:27:21.339266 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd"] Feb 16 11:27:21 crc kubenswrapper[4949]: E0216 11:27:21.492937 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:c6ad383f55f955902b074d1ee947a2233a5fcbf40698479ae693ce056c80dcc1\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" podUID="feaf7854-ba7b-4246-827d-941656a7f10b" Feb 16 11:27:22 crc kubenswrapper[4949]: E0216 11:27:22.055217 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Feb 16 11:27:22 crc kubenswrapper[4949]: E0216 11:27:22.056516 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ndgjh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-p9cl5_openstack-operators(9a87a761-d2b8-4202-98c2-391fdb512cc4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:27:22 crc kubenswrapper[4949]: E0216 11:27:22.057803 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" podUID="9a87a761-d2b8-4202-98c2-391fdb512cc4" Feb 16 11:27:22 crc kubenswrapper[4949]: W0216 11:27:22.376997 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ac656c0_2570_4fd7_acbb_96182ff1bc95.slice/crio-bfa836172daed84b249a9309fc2c9560520b3178496830b048b28f721e891979 WatchSource:0}: Error finding container bfa836172daed84b249a9309fc2c9560520b3178496830b048b28f721e891979: Status 404 returned error can't find the container with id bfa836172daed84b249a9309fc2c9560520b3178496830b048b28f721e891979 Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.378134 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g"] Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.448249 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x"] Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.516983 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" event={"ID":"7927070f-ccfa-49c3-9a88-6fdee387c97c","Type":"ContainerStarted","Data":"d8b3de05584d16a6614d76c8624369e04d4049c55e234adf6e2223034813fb6f"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.532746 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" event={"ID":"f0769aea-7db8-4dcb-bbde-8a4b918b3fa7","Type":"ContainerStarted","Data":"9582cbc0610829f315476a777740443bb9b433be9de1c0fe542a1b1b3b370109"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.534061 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.535376 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" event={"ID":"f583e5c3-96ec-417a-8d47-541896c301fb","Type":"ContainerStarted","Data":"4b864f762be247382da32f2a5587ee86cc76a22096aafa6f024caec374bc17e6"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.536221 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" event={"ID":"4274f89e-0708-44ec-9bbb-3bb865c71d82","Type":"ContainerStarted","Data":"8e83398d8da9e9e51e45390bea4b29d9943fc7dd5bea1bbdc92d6df301256801"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.537300 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" event={"ID":"6b381c9a-6963-419d-b96c-81ca6ea674d3","Type":"ContainerStarted","Data":"e146739c9ad2b2ef2571ad38ac6de7db88521496f01d413a3bb56fc2f710c350"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.545108 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" event={"ID":"c179d330-538e-4fc6-afeb-bc3bfdf92569","Type":"ContainerStarted","Data":"58b0a9a8f65ac8f5b4408aacafb59cf81136fe7f3b2a4dcb58efaec550f89095"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.547067 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.558742 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"c897db476ea0eaab84f58dfc5ce1290f1b6a8a12d03297a1f99537a46ae19905"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.568124 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" podStartSLOduration=14.205532346 podStartE2EDuration="40.56810506s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:44.381288733 +0000 UTC m=+1194.010622898" lastFinishedPulling="2026-02-16 11:27:10.743861437 +0000 UTC m=+1220.373195612" observedRunningTime="2026-02-16 11:27:22.558900838 +0000 UTC m=+1232.188235003" watchObservedRunningTime="2026-02-16 11:27:22.56810506 +0000 UTC m=+1232.197439225" Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.570384 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" event={"ID":"8ac656c0-2570-4fd7-acbb-96182ff1bc95","Type":"ContainerStarted","Data":"bfa836172daed84b249a9309fc2c9560520b3178496830b048b28f721e891979"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.588880 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" event={"ID":"6c160a96-7508-45af-82b7-37ca399bb5af","Type":"ContainerStarted","Data":"e1504e875cc1d3eed7baf0104558a09435a9df419e728c635f80ca12243289ef"} Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.590036 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.600206 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" podStartSLOduration=5.668540991 podStartE2EDuration="40.600186214s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.082392792 +0000 UTC m=+1195.711726957" lastFinishedPulling="2026-02-16 11:27:21.014038015 +0000 UTC m=+1230.643372180" observedRunningTime="2026-02-16 11:27:22.59691185 +0000 UTC m=+1232.226246015" watchObservedRunningTime="2026-02-16 11:27:22.600186214 +0000 UTC m=+1232.229520379" Feb 16 11:27:22 crc kubenswrapper[4949]: I0216 11:27:22.644061 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" podStartSLOduration=5.895704967 podStartE2EDuration="40.644041713s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.265703239 +0000 UTC m=+1195.895037404" lastFinishedPulling="2026-02-16 11:27:21.014039985 +0000 UTC m=+1230.643374150" observedRunningTime="2026-02-16 11:27:22.64359473 +0000 UTC m=+1232.272928895" watchObservedRunningTime="2026-02-16 11:27:22.644041713 +0000 UTC m=+1232.273375878" Feb 16 11:27:23 crc kubenswrapper[4949]: I0216 11:27:23.602980 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" event={"ID":"8ac656c0-2570-4fd7-acbb-96182ff1bc95","Type":"ContainerStarted","Data":"cccd5a64c2fb7f87f38e01448b34e3773b0e161706c20dc339bf2eacdee76348"} Feb 16 11:27:23 crc kubenswrapper[4949]: I0216 11:27:23.603446 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:27:23 crc kubenswrapper[4949]: I0216 11:27:23.605742 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" event={"ID":"70f95803-8857-4a40-b133-c12031e17f77","Type":"ContainerStarted","Data":"937949677df584614d1aee0707f8f20eabaae8ebde55f5cf7e3ac9d565f70b01"} Feb 16 11:27:23 crc kubenswrapper[4949]: I0216 11:27:23.606344 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" Feb 16 11:27:23 crc kubenswrapper[4949]: I0216 11:27:23.660056 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" podStartSLOduration=40.660040118 podStartE2EDuration="40.660040118s" podCreationTimestamp="2026-02-16 11:26:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:27:23.652973407 +0000 UTC m=+1233.282307582" watchObservedRunningTime="2026-02-16 11:27:23.660040118 +0000 UTC m=+1233.289374293" Feb 16 11:27:23 crc kubenswrapper[4949]: I0216 11:27:23.680772 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" podStartSLOduration=5.490742939 podStartE2EDuration="41.680754229s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:44.83192363 +0000 UTC m=+1194.461257795" lastFinishedPulling="2026-02-16 11:27:21.02193492 +0000 UTC m=+1230.651269085" observedRunningTime="2026-02-16 11:27:23.67589359 +0000 UTC m=+1233.305227755" watchObservedRunningTime="2026-02-16 11:27:23.680754229 +0000 UTC m=+1233.310088394" Feb 16 11:27:23 crc kubenswrapper[4949]: I0216 11:27:23.702158 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" podStartSLOduration=5.530134771 podStartE2EDuration="41.702141648s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:44.842038158 +0000 UTC m=+1194.471372323" lastFinishedPulling="2026-02-16 11:27:21.014044995 +0000 UTC m=+1230.643379200" observedRunningTime="2026-02-16 11:27:23.701221342 +0000 UTC m=+1233.330555497" watchObservedRunningTime="2026-02-16 11:27:23.702141648 +0000 UTC m=+1233.331475813" Feb 16 11:27:23 crc kubenswrapper[4949]: I0216 11:27:23.718067 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" podStartSLOduration=6.072252301 podStartE2EDuration="41.718047601s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:45.369589193 +0000 UTC m=+1194.998923358" lastFinishedPulling="2026-02-16 11:27:21.015384493 +0000 UTC m=+1230.644718658" observedRunningTime="2026-02-16 11:27:23.717159726 +0000 UTC m=+1233.346493891" watchObservedRunningTime="2026-02-16 11:27:23.718047601 +0000 UTC m=+1233.347381766" Feb 16 11:27:24 crc kubenswrapper[4949]: I0216 11:27:24.615787 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" Feb 16 11:27:25 crc kubenswrapper[4949]: I0216 11:27:25.629771 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" event={"ID":"06428bd6-f2fd-44ab-b71a-fe48ec54189d","Type":"ContainerStarted","Data":"45d2166e37a452e7401160c38e288ac86bc03bc729663ce71d1af0046359207a"} Feb 16 11:27:25 crc kubenswrapper[4949]: I0216 11:27:25.631106 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" Feb 16 11:27:25 crc kubenswrapper[4949]: I0216 11:27:25.665080 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" podStartSLOduration=5.249785904 podStartE2EDuration="42.665050809s" podCreationTimestamp="2026-02-16 11:26:43 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.263351823 +0000 UTC m=+1195.892685988" lastFinishedPulling="2026-02-16 11:27:23.678616728 +0000 UTC m=+1233.307950893" observedRunningTime="2026-02-16 11:27:25.65173084 +0000 UTC m=+1235.281064995" watchObservedRunningTime="2026-02-16 11:27:25.665050809 +0000 UTC m=+1235.294384974" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.721456 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" event={"ID":"f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f","Type":"ContainerStarted","Data":"b327d279ef6cd1e93445ef2d68bd78486b621f6b1e8d999a1deed83e7329069b"} Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.722520 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.734494 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" event={"ID":"6f3a40af-b9a5-4709-8173-fb62a0d053e8","Type":"ContainerStarted","Data":"5a060aa8b0df22bf72fed551960c5149a102c72cb52f74605791885c5c76c486"} Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.735392 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.774007 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" event={"ID":"3608d840-be2f-478e-8252-e41f5480853a","Type":"ContainerStarted","Data":"564b68966acf0e6c387ea353f89c6caac72c25bdc6eb0d9583fd3c1579ba4915"} Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.775130 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.793606 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" event={"ID":"14ac83e9-3142-4e62-b2a5-789822ea3013","Type":"ContainerStarted","Data":"34f2f2a657bfe6379110b63a9f3cb5caa8514009c58493d8b443d20ac9f8e600"} Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.794730 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.804818 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.810690 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" event={"ID":"482d88cb-1680-4276-8373-bf565231eadf","Type":"ContainerStarted","Data":"2d22d1b022d1a0ac13d9e757dfb10724b742029e50a87b50752b331ee34d4642"} Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.811351 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.834120 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" podStartSLOduration=6.765321413 podStartE2EDuration="44.834099715s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.190711095 +0000 UTC m=+1195.820045260" lastFinishedPulling="2026-02-16 11:27:24.259489397 +0000 UTC m=+1233.888823562" observedRunningTime="2026-02-16 11:27:26.78302767 +0000 UTC m=+1236.412361825" watchObservedRunningTime="2026-02-16 11:27:26.834099715 +0000 UTC m=+1236.463433880" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.837880 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" podStartSLOduration=5.7013552149999995 podStartE2EDuration="44.837864932s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:44.542120361 +0000 UTC m=+1194.171454526" lastFinishedPulling="2026-02-16 11:27:23.678630078 +0000 UTC m=+1233.307964243" observedRunningTime="2026-02-16 11:27:26.82199812 +0000 UTC m=+1236.451332295" watchObservedRunningTime="2026-02-16 11:27:26.837864932 +0000 UTC m=+1236.467199097" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.850061 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" event={"ID":"f583e5c3-96ec-417a-8d47-541896c301fb","Type":"ContainerStarted","Data":"7a77371b1424de594ab1f3ae2720ef6648448e728b81625b875fa1934a8315ed"} Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.850115 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.885513 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" podStartSLOduration=6.861031299 podStartE2EDuration="44.885492229s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.24182865 +0000 UTC m=+1195.871162805" lastFinishedPulling="2026-02-16 11:27:24.26628957 +0000 UTC m=+1233.895623735" observedRunningTime="2026-02-16 11:27:26.876477372 +0000 UTC m=+1236.505811537" watchObservedRunningTime="2026-02-16 11:27:26.885492229 +0000 UTC m=+1236.514826404" Feb 16 11:27:26 crc kubenswrapper[4949]: I0216 11:27:26.966899 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" podStartSLOduration=40.959719477 podStartE2EDuration="44.966876868s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:27:21.844161295 +0000 UTC m=+1231.473495460" lastFinishedPulling="2026-02-16 11:27:25.851318666 +0000 UTC m=+1235.480652851" observedRunningTime="2026-02-16 11:27:26.933896068 +0000 UTC m=+1236.563230233" watchObservedRunningTime="2026-02-16 11:27:26.966876868 +0000 UTC m=+1236.596211033" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.021892 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" podStartSLOduration=42.039062276 podStartE2EDuration="45.021868394s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:27:23.114658671 +0000 UTC m=+1232.743992836" lastFinishedPulling="2026-02-16 11:27:26.097464789 +0000 UTC m=+1235.726798954" observedRunningTime="2026-02-16 11:27:27.017413457 +0000 UTC m=+1236.646747622" watchObservedRunningTime="2026-02-16 11:27:27.021868394 +0000 UTC m=+1236.651202559" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.022107 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" podStartSLOduration=5.132248546 podStartE2EDuration="45.022101631s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:44.37274103 +0000 UTC m=+1194.002075195" lastFinishedPulling="2026-02-16 11:27:24.262594115 +0000 UTC m=+1233.891928280" observedRunningTime="2026-02-16 11:27:26.995530944 +0000 UTC m=+1236.624865109" watchObservedRunningTime="2026-02-16 11:27:27.022101631 +0000 UTC m=+1236.651435796" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.067397 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" podStartSLOduration=5.808606339 podStartE2EDuration="45.067368851s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:45.00161309 +0000 UTC m=+1194.630947265" lastFinishedPulling="2026-02-16 11:27:24.260375612 +0000 UTC m=+1233.889709777" observedRunningTime="2026-02-16 11:27:27.066524157 +0000 UTC m=+1236.695858322" watchObservedRunningTime="2026-02-16 11:27:27.067368851 +0000 UTC m=+1236.696703006" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.859552 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" event={"ID":"b81e3e22-4860-4765-9683-675c1fbbefef","Type":"ContainerStarted","Data":"5a7ecbc18e1b0b8b5a7425eeeee13cb8286d79ff3152cbcafa095761633ae94d"} Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.860248 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.862065 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" event={"ID":"4274f89e-0708-44ec-9bbb-3bb865c71d82","Type":"ContainerStarted","Data":"dab13d80d9c48c2d7e69c7216ac21eb4301b5ffbe0999f6d13893b4e56b7391b"} Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.863662 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" event={"ID":"d1e2149a-cad2-4e22-822c-d5afb8294a25","Type":"ContainerStarted","Data":"f681cb78b1e2955367b9aa316d01fd840296bbab7b0fbc414aa60150445da2ac"} Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.863885 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.865415 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" event={"ID":"10670483-76f3-4774-9aa4-a0c21ff5799e","Type":"ContainerStarted","Data":"f4c394ea10e69d32f29df3732268ef7d47bd5f8dccede7ec35c21c4adbf8b15c"} Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.865650 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.867840 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" event={"ID":"ad558c16-93d3-4e12-8feb-56516d331bbe","Type":"ContainerStarted","Data":"c8a5922518d7174253dd692cbc9e525f1f21adf9d88c3d2a340127e4d14a73e5"} Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.895950 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" podStartSLOduration=5.313663703 podStartE2EDuration="45.895932676s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:45.217602027 +0000 UTC m=+1194.846936192" lastFinishedPulling="2026-02-16 11:27:25.799871 +0000 UTC m=+1235.429205165" observedRunningTime="2026-02-16 11:27:27.888942637 +0000 UTC m=+1237.518276802" watchObservedRunningTime="2026-02-16 11:27:27.895932676 +0000 UTC m=+1237.525266841" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.915874 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" podStartSLOduration=7.913900785 podStartE2EDuration="45.915852874s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.264271439 +0000 UTC m=+1195.893605604" lastFinishedPulling="2026-02-16 11:27:24.266223528 +0000 UTC m=+1233.895557693" observedRunningTime="2026-02-16 11:27:27.910375908 +0000 UTC m=+1237.539710063" watchObservedRunningTime="2026-02-16 11:27:27.915852874 +0000 UTC m=+1237.545187039" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.934992 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" podStartSLOduration=7.037939872 podStartE2EDuration="45.934975738s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.151059876 +0000 UTC m=+1195.780394041" lastFinishedPulling="2026-02-16 11:27:25.048095742 +0000 UTC m=+1234.677429907" observedRunningTime="2026-02-16 11:27:27.932794156 +0000 UTC m=+1237.562128321" watchObservedRunningTime="2026-02-16 11:27:27.934975738 +0000 UTC m=+1237.564309893" Feb 16 11:27:27 crc kubenswrapper[4949]: I0216 11:27:27.961759 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" podStartSLOduration=5.480898614 podStartE2EDuration="45.961734741s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:45.319474186 +0000 UTC m=+1194.948808351" lastFinishedPulling="2026-02-16 11:27:25.800310313 +0000 UTC m=+1235.429644478" observedRunningTime="2026-02-16 11:27:27.951210971 +0000 UTC m=+1237.580545166" watchObservedRunningTime="2026-02-16 11:27:27.961734741 +0000 UTC m=+1237.591068916" Feb 16 11:27:32 crc kubenswrapper[4949]: I0216 11:27:32.856741 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-7ndss" Feb 16 11:27:32 crc kubenswrapper[4949]: I0216 11:27:32.940843 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-zm28n" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.102983 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-blpwl" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.266797 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-77987464f4-w59wt" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.304142 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-r82d9" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.322500 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.346161 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-j7vgc" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.347743 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-wsh6f" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.496423 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-d928p" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.553657 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-lzkxk" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.589156 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.591225 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-jv8z8" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.744452 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-4bjjf" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.771719 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-vsknd" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.874078 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-68f46476f-dk4s8" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.928641 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" event={"ID":"feaf7854-ba7b-4246-827d-941656a7f10b","Type":"ContainerStarted","Data":"ef593208415ee29a073c2d74402bc791d0622f71500c3f306c12c13a2538313d"} Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.929331 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.931037 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" event={"ID":"20c60056-65e4-486e-8b5e-bf7aef44b9bc","Type":"ContainerStarted","Data":"c175ed0ef372792d8f20c904a2f22f499c5b19e19100d0d813099ea6fa42f443"} Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.931434 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.963531 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" podStartSLOduration=4.053922245 podStartE2EDuration="51.963509917s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:44.847927146 +0000 UTC m=+1194.477261321" lastFinishedPulling="2026-02-16 11:27:32.757514828 +0000 UTC m=+1242.386848993" observedRunningTime="2026-02-16 11:27:33.946806411 +0000 UTC m=+1243.576140576" watchObservedRunningTime="2026-02-16 11:27:33.963509917 +0000 UTC m=+1243.592844082" Feb 16 11:27:33 crc kubenswrapper[4949]: I0216 11:27:33.978919 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" podStartSLOduration=5.370156479 podStartE2EDuration="51.978888265s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.150582303 +0000 UTC m=+1195.779916468" lastFinishedPulling="2026-02-16 11:27:32.759314089 +0000 UTC m=+1242.388648254" observedRunningTime="2026-02-16 11:27:33.967796039 +0000 UTC m=+1243.597130244" watchObservedRunningTime="2026-02-16 11:27:33.978888265 +0000 UTC m=+1243.608222430" Feb 16 11:27:34 crc kubenswrapper[4949]: I0216 11:27:34.154044 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-dsfbf" Feb 16 11:27:34 crc kubenswrapper[4949]: I0216 11:27:34.217955 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-7866795846-f4l9p" Feb 16 11:27:34 crc kubenswrapper[4949]: E0216 11:27:34.237152 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" podUID="9a87a761-d2b8-4202-98c2-391fdb512cc4" Feb 16 11:27:34 crc kubenswrapper[4949]: I0216 11:27:34.563634 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-kwgzs" Feb 16 11:27:35 crc kubenswrapper[4949]: I0216 11:27:35.185045 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79d975b745-6ms2x" Feb 16 11:27:36 crc kubenswrapper[4949]: I0216 11:27:36.435797 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6b65fbbb9f-7j44g" Feb 16 11:27:36 crc kubenswrapper[4949]: I0216 11:27:36.957884 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" event={"ID":"6c8f8a0e-a378-4b98-8c9b-e5180c97e088","Type":"ContainerStarted","Data":"2a6e8b57a0d99e0035b97be35816a638511bece787a9c96e2a71faa748ef1582"} Feb 16 11:27:36 crc kubenswrapper[4949]: I0216 11:27:36.958452 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" Feb 16 11:27:36 crc kubenswrapper[4949]: I0216 11:27:36.978604 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" podStartSLOduration=5.356147846 podStartE2EDuration="54.978582003s" podCreationTimestamp="2026-02-16 11:26:42 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.25657973 +0000 UTC m=+1195.885913895" lastFinishedPulling="2026-02-16 11:27:35.879013887 +0000 UTC m=+1245.508348052" observedRunningTime="2026-02-16 11:27:36.976749021 +0000 UTC m=+1246.606083206" watchObservedRunningTime="2026-02-16 11:27:36.978582003 +0000 UTC m=+1246.607916168" Feb 16 11:27:39 crc kubenswrapper[4949]: I0216 11:27:39.449040 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd" Feb 16 11:27:43 crc kubenswrapper[4949]: I0216 11:27:43.397501 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-57rt6" Feb 16 11:27:43 crc kubenswrapper[4949]: I0216 11:27:43.643103 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-cx2cw" Feb 16 11:27:44 crc kubenswrapper[4949]: I0216 11:27:44.179891 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-64b85768bb-85t96" Feb 16 11:27:50 crc kubenswrapper[4949]: I0216 11:27:50.101291 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" event={"ID":"9a87a761-d2b8-4202-98c2-391fdb512cc4","Type":"ContainerStarted","Data":"1acfbc8b68b4dab62a02fa4a9be27268394d234054bb3a6b545406a8da32da12"} Feb 16 11:27:50 crc kubenswrapper[4949]: I0216 11:27:50.125953 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-p9cl5" podStartSLOduration=3.613879708 podStartE2EDuration="1m7.125936611s" podCreationTimestamp="2026-02-16 11:26:43 +0000 UTC" firstStartedPulling="2026-02-16 11:26:46.291563766 +0000 UTC m=+1195.920897931" lastFinishedPulling="2026-02-16 11:27:49.803620669 +0000 UTC m=+1259.432954834" observedRunningTime="2026-02-16 11:27:50.118648324 +0000 UTC m=+1259.747982479" watchObservedRunningTime="2026-02-16 11:27:50.125936611 +0000 UTC m=+1259.755270776" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.340804 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fsvfh"] Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.344658 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.349987 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.350051 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.350289 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.354146 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fsvfh"] Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.355257 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-9pjv5" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.405348 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2j9gn"] Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.410715 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.418978 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2j9gn"] Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.420338 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.525552 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-config\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.525626 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138cdd70-b3af-45cd-88a3-c80fcd10e094-config\") pod \"dnsmasq-dns-675f4bcbfc-fsvfh\" (UID: \"138cdd70-b3af-45cd-88a3-c80fcd10e094\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.525662 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nz6l\" (UniqueName: \"kubernetes.io/projected/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-kube-api-access-7nz6l\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.525804 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.526109 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4npw\" (UniqueName: \"kubernetes.io/projected/138cdd70-b3af-45cd-88a3-c80fcd10e094-kube-api-access-s4npw\") pod \"dnsmasq-dns-675f4bcbfc-fsvfh\" (UID: \"138cdd70-b3af-45cd-88a3-c80fcd10e094\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.628278 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4npw\" (UniqueName: \"kubernetes.io/projected/138cdd70-b3af-45cd-88a3-c80fcd10e094-kube-api-access-s4npw\") pod \"dnsmasq-dns-675f4bcbfc-fsvfh\" (UID: \"138cdd70-b3af-45cd-88a3-c80fcd10e094\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.628429 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-config\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.628468 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138cdd70-b3af-45cd-88a3-c80fcd10e094-config\") pod \"dnsmasq-dns-675f4bcbfc-fsvfh\" (UID: \"138cdd70-b3af-45cd-88a3-c80fcd10e094\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.628495 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nz6l\" (UniqueName: \"kubernetes.io/projected/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-kube-api-access-7nz6l\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.628535 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.629728 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.630817 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-config\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.631480 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138cdd70-b3af-45cd-88a3-c80fcd10e094-config\") pod \"dnsmasq-dns-675f4bcbfc-fsvfh\" (UID: \"138cdd70-b3af-45cd-88a3-c80fcd10e094\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.654365 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nz6l\" (UniqueName: \"kubernetes.io/projected/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-kube-api-access-7nz6l\") pod \"dnsmasq-dns-78dd6ddcc-2j9gn\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.657545 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4npw\" (UniqueName: \"kubernetes.io/projected/138cdd70-b3af-45cd-88a3-c80fcd10e094-kube-api-access-s4npw\") pod \"dnsmasq-dns-675f4bcbfc-fsvfh\" (UID: \"138cdd70-b3af-45cd-88a3-c80fcd10e094\") " pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.683259 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:06 crc kubenswrapper[4949]: I0216 11:28:06.729855 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:07 crc kubenswrapper[4949]: I0216 11:28:07.299466 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fsvfh"] Feb 16 11:28:07 crc kubenswrapper[4949]: I0216 11:28:07.391878 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2j9gn"] Feb 16 11:28:07 crc kubenswrapper[4949]: W0216 11:28:07.396752 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d7f15c5_4079_4ffd_8f92_41c2c2807a7c.slice/crio-e2732a139ab145772c3e38e0040fbb5ab3c6d080c53d7e0fa2e794535fc3607e WatchSource:0}: Error finding container e2732a139ab145772c3e38e0040fbb5ab3c6d080c53d7e0fa2e794535fc3607e: Status 404 returned error can't find the container with id e2732a139ab145772c3e38e0040fbb5ab3c6d080c53d7e0fa2e794535fc3607e Feb 16 11:28:08 crc kubenswrapper[4949]: I0216 11:28:08.269421 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" event={"ID":"138cdd70-b3af-45cd-88a3-c80fcd10e094","Type":"ContainerStarted","Data":"076d48114a77751b984e9703f8547c6a8c334cae7317399283a326d2b8c4f576"} Feb 16 11:28:08 crc kubenswrapper[4949]: I0216 11:28:08.271515 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" event={"ID":"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c","Type":"ContainerStarted","Data":"e2732a139ab145772c3e38e0040fbb5ab3c6d080c53d7e0fa2e794535fc3607e"} Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.024561 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fsvfh"] Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.065558 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-qvg6k"] Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.067940 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.097804 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-qvg6k"] Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.196649 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2gj9\" (UniqueName: \"kubernetes.io/projected/2877518f-206f-4886-88c6-22754e8c2556-kube-api-access-z2gj9\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.196783 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-dns-svc\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.196848 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-config\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.303630 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2gj9\" (UniqueName: \"kubernetes.io/projected/2877518f-206f-4886-88c6-22754e8c2556-kube-api-access-z2gj9\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.303767 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-dns-svc\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.303805 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-config\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.304984 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-dns-svc\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.305000 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-config\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.343082 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2gj9\" (UniqueName: \"kubernetes.io/projected/2877518f-206f-4886-88c6-22754e8c2556-kube-api-access-z2gj9\") pod \"dnsmasq-dns-666b6646f7-qvg6k\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.405707 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.593714 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2j9gn"] Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.636902 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wkwb6"] Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.640742 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.678347 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wkwb6"] Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.816504 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7s92\" (UniqueName: \"kubernetes.io/projected/1241685d-968e-4259-8d7e-f27c55e1fb99-kube-api-access-q7s92\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.816583 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-config\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.816639 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.925022 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7s92\" (UniqueName: \"kubernetes.io/projected/1241685d-968e-4259-8d7e-f27c55e1fb99-kube-api-access-q7s92\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.925093 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-config\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.925159 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.926890 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.928098 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-config\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:09 crc kubenswrapper[4949]: I0216 11:28:09.969307 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7s92\" (UniqueName: \"kubernetes.io/projected/1241685d-968e-4259-8d7e-f27c55e1fb99-kube-api-access-q7s92\") pod \"dnsmasq-dns-57d769cc4f-wkwb6\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.029392 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.210830 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.212492 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.216998 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.220364 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.220709 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.221364 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.221693 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-stkt7" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.221900 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.223805 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.251134 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.277736 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.280955 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.291966 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.294819 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.332550 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.336220 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.341709 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxqff\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-kube-api-access-pxqff\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.341910 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.344305 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.346627 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.347562 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.347837 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.348120 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.348517 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-config-data\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.348578 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2b4e8478-eec0-499f-a824-b0f07355e4f6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.348640 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2b4e8478-eec0-499f-a824-b0f07355e4f6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.348708 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.418025 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-qvg6k"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.452816 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxqff\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-kube-api-access-pxqff\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453288 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453354 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453379 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-pod-info\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453426 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-server-conf\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453450 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-config-data\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453496 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-config-data\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453527 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453550 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453607 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453665 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453711 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.453828 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2fd90353-44d0-4269-84cc-f90c10eb6da4-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454034 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454059 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454101 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454127 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454186 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454248 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454266 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-server-conf\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454288 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454338 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-config-data\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454363 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2b4e8478-eec0-499f-a824-b0f07355e4f6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454382 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454441 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2fd90353-44d0-4269-84cc-f90c10eb6da4-pod-info\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454535 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2b4e8478-eec0-499f-a824-b0f07355e4f6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454609 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454630 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9xqh\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-kube-api-access-l9xqh\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454678 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454711 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wzds\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-kube-api-access-2wzds\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454731 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454764 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454786 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.454876 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.455477 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.456523 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-config-data\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.460225 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.465055 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.470146 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2b4e8478-eec0-499f-a824-b0f07355e4f6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.474306 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.474448 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.474491 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1ff64a0fd9ac55a5bb2ee7ea71b4291dabca12fad6bff89052d3968762dcebbb/globalmount\"" pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.475060 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.479686 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2b4e8478-eec0-499f-a824-b0f07355e4f6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.483950 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxqff\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-kube-api-access-pxqff\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567205 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9xqh\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-kube-api-access-l9xqh\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567292 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567334 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wzds\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-kube-api-access-2wzds\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567360 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567384 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567432 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567500 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-pod-info\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567523 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-server-conf\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567559 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-config-data\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567581 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-config-data\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567620 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567646 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567684 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567735 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567759 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2fd90353-44d0-4269-84cc-f90c10eb6da4-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567833 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567864 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567893 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.567934 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.568009 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-server-conf\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.568064 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.568092 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2fd90353-44d0-4269-84cc-f90c10eb6da4-pod-info\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.572590 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.579039 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.583773 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.584063 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.584281 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.585242 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-config-data\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.586113 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-server-conf\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.587405 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-config-data\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.589637 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.590103 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2fd90353-44d0-4269-84cc-f90c10eb6da4-pod-info\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.591342 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-server-conf\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.592465 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.592768 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.593651 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.595851 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.596434 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2fd90353-44d0-4269-84cc-f90c10eb6da4-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.606576 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.606624 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/58db376e1c9e37d07cbb4afde9259fa1e392f418eb7d5b83a633bbc03bd8d1d4/globalmount\"" pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.608276 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.608324 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/109184ac86226b22c0d6666a38f57f93574572ac48e48f16154f0acaca50e7d1/globalmount\"" pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.612528 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-pod-info\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.612658 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wzds\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-kube-api-access-2wzds\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.623959 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9xqh\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-kube-api-access-l9xqh\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.627928 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.699003 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"rabbitmq-server-0\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.704672 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"rabbitmq-server-1\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.735406 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"rabbitmq-server-2\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " pod="openstack/rabbitmq-server-2" Feb 16 11:28:10 crc kubenswrapper[4949]: W0216 11:28:10.757390 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1241685d_968e_4259_8d7e_f27c55e1fb99.slice/crio-083a05a0f269c758953ec9c4ec64ea9ac4d951a16604a77925f11ba26dd87657 WatchSource:0}: Error finding container 083a05a0f269c758953ec9c4ec64ea9ac4d951a16604a77925f11ba26dd87657: Status 404 returned error can't find the container with id 083a05a0f269c758953ec9c4ec64ea9ac4d951a16604a77925f11ba26dd87657 Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.758097 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wkwb6"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.781918 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.784041 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.789291 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.790506 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.790744 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.790962 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.791243 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-mffhw" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.791603 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.791874 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.799200 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.871287 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894101 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894232 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894267 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f715146-edc4-4f1f-80e3-f134d9833f47-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894367 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894405 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894434 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6l5t\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-kube-api-access-k6l5t\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894501 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894763 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894887 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f715146-edc4-4f1f-80e3-f134d9833f47-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894938 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.894962 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.940813 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.997905 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.997971 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998029 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998053 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f715146-edc4-4f1f-80e3-f134d9833f47-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998130 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998157 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998186 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6l5t\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-kube-api-access-k6l5t\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998213 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998257 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998295 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f715146-edc4-4f1f-80e3-f134d9833f47-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:10 crc kubenswrapper[4949]: I0216 11:28:10.998323 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:10.999511 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:10.999664 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:10.999835 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.000791 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.001987 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.012065 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f715146-edc4-4f1f-80e3-f134d9833f47-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.018070 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.018606 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.018606 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f715146-edc4-4f1f-80e3-f134d9833f47-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.018640 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.018651 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f1fa8d0095524c016fc22ddccb6ce776ebb58da8e1711e3d6932ae5bed958a29/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.018833 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.023222 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6l5t\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-kube-api-access-k6l5t\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.070735 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"rabbitmq-cell1-server-0\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.115650 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.385549 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" event={"ID":"1241685d-968e-4259-8d7e-f27c55e1fb99","Type":"ContainerStarted","Data":"083a05a0f269c758953ec9c4ec64ea9ac4d951a16604a77925f11ba26dd87657"} Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.388675 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" event={"ID":"2877518f-206f-4886-88c6-22754e8c2556","Type":"ContainerStarted","Data":"d3bd64e52e2735a7ecfe858405b944f79cd9294120f16d5ccb9ec74e60c6fcea"} Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.529446 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:28:11 crc kubenswrapper[4949]: W0216 11:28:11.536968 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b4e8478_eec0_499f_a824_b0f07355e4f6.slice/crio-e5b8549583bda9f42a5052692f24aa6850f0788c2b274beb42bd5b481e323e2b WatchSource:0}: Error finding container e5b8549583bda9f42a5052692f24aa6850f0788c2b274beb42bd5b481e323e2b: Status 404 returned error can't find the container with id e5b8549583bda9f42a5052692f24aa6850f0788c2b274beb42bd5b481e323e2b Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.638258 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.642203 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.654071 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.654446 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.655030 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.655208 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-g4d62" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.662544 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.667852 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.717269 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.717332 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8511a9d-0c08-43c9-9243-f340d75fabe1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.717419 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f8511a9d-0c08-43c9-9243-f340d75fabe1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.717452 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-kolla-config\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.717493 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psrjk\" (UniqueName: \"kubernetes.io/projected/f8511a9d-0c08-43c9-9243-f340d75fabe1-kube-api-access-psrjk\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.717520 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-config-data-default\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.717569 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8511a9d-0c08-43c9-9243-f340d75fabe1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.717618 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-94a217f4-793a-4771-95e1-fc3728151462\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-94a217f4-793a-4771-95e1-fc3728151462\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.744144 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.772274 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.821551 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.821637 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8511a9d-0c08-43c9-9243-f340d75fabe1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.821794 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f8511a9d-0c08-43c9-9243-f340d75fabe1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.821842 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-kolla-config\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.821939 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psrjk\" (UniqueName: \"kubernetes.io/projected/f8511a9d-0c08-43c9-9243-f340d75fabe1-kube-api-access-psrjk\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.822002 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-config-data-default\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.822088 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8511a9d-0c08-43c9-9243-f340d75fabe1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.822193 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-94a217f4-793a-4771-95e1-fc3728151462\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-94a217f4-793a-4771-95e1-fc3728151462\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.823526 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-kolla-config\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.826424 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-config-data-default\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.830876 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8511a9d-0c08-43c9-9243-f340d75fabe1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.831862 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f8511a9d-0c08-43c9-9243-f340d75fabe1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.835456 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8511a9d-0c08-43c9-9243-f340d75fabe1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.836287 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.836431 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-94a217f4-793a-4771-95e1-fc3728151462\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-94a217f4-793a-4771-95e1-fc3728151462\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/609e12e98087ed3cadb7889ef58d3ca0dc019ea6c3f8b66c9b93865d78634f24/globalmount\"" pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.854275 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8511a9d-0c08-43c9-9243-f340d75fabe1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:11 crc kubenswrapper[4949]: I0216 11:28:11.863467 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psrjk\" (UniqueName: \"kubernetes.io/projected/f8511a9d-0c08-43c9-9243-f340d75fabe1-kube-api-access-psrjk\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:12 crc kubenswrapper[4949]: I0216 11:28:12.042849 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:28:12 crc kubenswrapper[4949]: I0216 11:28:12.066323 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-94a217f4-793a-4771-95e1-fc3728151462\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-94a217f4-793a-4771-95e1-fc3728151462\") pod \"openstack-galera-0\" (UID: \"f8511a9d-0c08-43c9-9243-f340d75fabe1\") " pod="openstack/openstack-galera-0" Feb 16 11:28:12 crc kubenswrapper[4949]: I0216 11:28:12.272494 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 16 11:28:12 crc kubenswrapper[4949]: I0216 11:28:12.411597 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2fd90353-44d0-4269-84cc-f90c10eb6da4","Type":"ContainerStarted","Data":"44dab8a1e0b6b38472154706d763c323ef398137fa0702f28119f2592037c06a"} Feb 16 11:28:12 crc kubenswrapper[4949]: I0216 11:28:12.418476 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2b4e8478-eec0-499f-a824-b0f07355e4f6","Type":"ContainerStarted","Data":"e5b8549583bda9f42a5052692f24aa6850f0788c2b274beb42bd5b481e323e2b"} Feb 16 11:28:12 crc kubenswrapper[4949]: I0216 11:28:12.421565 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f715146-edc4-4f1f-80e3-f134d9833f47","Type":"ContainerStarted","Data":"fc9e07b8da78c26b558ce3d57c7ccaf3be86c18bddf714d178c8caaeaae7c828"} Feb 16 11:28:12 crc kubenswrapper[4949]: I0216 11:28:12.425353 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a","Type":"ContainerStarted","Data":"2588f03babc9679c43ce03988cd721bb82dc36c86eeb9933b8dfcd938b9b19ed"} Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.120244 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.174202 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.177230 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.180135 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.180507 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-ww8wd" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.180886 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.181932 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.194876 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.212542 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.212725 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.212878 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.213002 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.213156 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.213241 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxmw9\" (UniqueName: \"kubernetes.io/projected/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-kube-api-access-hxmw9\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.213328 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.213405 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-2e78d12f-a480-42e2-9683-c4851443bb81\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2e78d12f-a480-42e2-9683-c4851443bb81\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.321962 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-2e78d12f-a480-42e2-9683-c4851443bb81\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2e78d12f-a480-42e2-9683-c4851443bb81\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.322120 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.322153 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.322208 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.323692 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.324443 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.324556 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.324725 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.324730 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.324786 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxmw9\" (UniqueName: \"kubernetes.io/projected/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-kube-api-access-hxmw9\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.324951 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.332081 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.332133 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-2e78d12f-a480-42e2-9683-c4851443bb81\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2e78d12f-a480-42e2-9683-c4851443bb81\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/309a545f40e096d4dd611a6a6bcd4eca0610718d6734dc9e245cc312881e93f3/globalmount\"" pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.333671 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.334916 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.337928 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.344848 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxmw9\" (UniqueName: \"kubernetes.io/projected/9a5b2f17-57bf-4aad-b18c-d1ec47f358c9-kube-api-access-hxmw9\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.393911 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-2e78d12f-a480-42e2-9683-c4851443bb81\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2e78d12f-a480-42e2-9683-c4851443bb81\") pod \"openstack-cell1-galera-0\" (UID: \"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9\") " pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.498666 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.618355 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.625061 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.629098 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-xztvb" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.629724 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.630102 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.641017 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/03acf817-d9f3-4c65-b4c0-920136bc3d7b-config-data\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.641215 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/03acf817-d9f3-4c65-b4c0-920136bc3d7b-kolla-config\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.641304 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bz6t\" (UniqueName: \"kubernetes.io/projected/03acf817-d9f3-4c65-b4c0-920136bc3d7b-kube-api-access-4bz6t\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.641330 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03acf817-d9f3-4c65-b4c0-920136bc3d7b-combined-ca-bundle\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.641390 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/03acf817-d9f3-4c65-b4c0-920136bc3d7b-memcached-tls-certs\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.670140 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.744209 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/03acf817-d9f3-4c65-b4c0-920136bc3d7b-kolla-config\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.744343 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bz6t\" (UniqueName: \"kubernetes.io/projected/03acf817-d9f3-4c65-b4c0-920136bc3d7b-kube-api-access-4bz6t\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.744376 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03acf817-d9f3-4c65-b4c0-920136bc3d7b-combined-ca-bundle\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.744493 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/03acf817-d9f3-4c65-b4c0-920136bc3d7b-memcached-tls-certs\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.744594 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/03acf817-d9f3-4c65-b4c0-920136bc3d7b-config-data\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.745714 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/03acf817-d9f3-4c65-b4c0-920136bc3d7b-kolla-config\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.751526 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/03acf817-d9f3-4c65-b4c0-920136bc3d7b-config-data\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.754933 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/03acf817-d9f3-4c65-b4c0-920136bc3d7b-memcached-tls-certs\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.761838 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03acf817-d9f3-4c65-b4c0-920136bc3d7b-combined-ca-bundle\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:13 crc kubenswrapper[4949]: I0216 11:28:13.799444 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bz6t\" (UniqueName: \"kubernetes.io/projected/03acf817-d9f3-4c65-b4c0-920136bc3d7b-kube-api-access-4bz6t\") pod \"memcached-0\" (UID: \"03acf817-d9f3-4c65-b4c0-920136bc3d7b\") " pod="openstack/memcached-0" Feb 16 11:28:14 crc kubenswrapper[4949]: I0216 11:28:14.027399 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 16 11:28:14 crc kubenswrapper[4949]: I0216 11:28:14.201096 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 16 11:28:14 crc kubenswrapper[4949]: W0216 11:28:14.247969 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a5b2f17_57bf_4aad_b18c_d1ec47f358c9.slice/crio-c770a5f9f3cc644ea2348cd6bffe90867414c0e61cbba9ba5dd8aa20b08fe6ab WatchSource:0}: Error finding container c770a5f9f3cc644ea2348cd6bffe90867414c0e61cbba9ba5dd8aa20b08fe6ab: Status 404 returned error can't find the container with id c770a5f9f3cc644ea2348cd6bffe90867414c0e61cbba9ba5dd8aa20b08fe6ab Feb 16 11:28:14 crc kubenswrapper[4949]: I0216 11:28:14.466469 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f8511a9d-0c08-43c9-9243-f340d75fabe1","Type":"ContainerStarted","Data":"53a6d3c487e6b5cc83dd7db84b90eab07eefa5a95d3cfe28d02164b75cc65c0e"} Feb 16 11:28:14 crc kubenswrapper[4949]: I0216 11:28:14.471047 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9","Type":"ContainerStarted","Data":"c770a5f9f3cc644ea2348cd6bffe90867414c0e61cbba9ba5dd8aa20b08fe6ab"} Feb 16 11:28:14 crc kubenswrapper[4949]: I0216 11:28:14.750476 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 16 11:28:15 crc kubenswrapper[4949]: I0216 11:28:15.557222 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"03acf817-d9f3-4c65-b4c0-920136bc3d7b","Type":"ContainerStarted","Data":"69435a2a784aec40584d9e9fc2938ac15f0987dcf34502fecde10b71c4105211"} Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.176272 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.178804 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.183900 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-98mm4" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.220962 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.277819 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm986\" (UniqueName: \"kubernetes.io/projected/f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41-kube-api-access-tm986\") pod \"kube-state-metrics-0\" (UID: \"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41\") " pod="openstack/kube-state-metrics-0" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.380013 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm986\" (UniqueName: \"kubernetes.io/projected/f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41-kube-api-access-tm986\") pod \"kube-state-metrics-0\" (UID: \"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41\") " pod="openstack/kube-state-metrics-0" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.442917 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm986\" (UniqueName: \"kubernetes.io/projected/f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41-kube-api-access-tm986\") pod \"kube-state-metrics-0\" (UID: \"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41\") " pod="openstack/kube-state-metrics-0" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.529130 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.930444 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6"] Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.932013 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.946367 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-bsqkx" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.946545 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Feb 16 11:28:16 crc kubenswrapper[4949]: I0216 11:28:16.961062 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6"] Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.002813 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f99782ba-1386-4f77-ba13-bb2fd7ab6935-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-qzxn6\" (UID: \"f99782ba-1386-4f77-ba13-bb2fd7ab6935\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.002971 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm9hb\" (UniqueName: \"kubernetes.io/projected/f99782ba-1386-4f77-ba13-bb2fd7ab6935-kube-api-access-wm9hb\") pod \"observability-ui-dashboards-66cbf594b5-qzxn6\" (UID: \"f99782ba-1386-4f77-ba13-bb2fd7ab6935\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.107659 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm9hb\" (UniqueName: \"kubernetes.io/projected/f99782ba-1386-4f77-ba13-bb2fd7ab6935-kube-api-access-wm9hb\") pod \"observability-ui-dashboards-66cbf594b5-qzxn6\" (UID: \"f99782ba-1386-4f77-ba13-bb2fd7ab6935\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.107767 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f99782ba-1386-4f77-ba13-bb2fd7ab6935-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-qzxn6\" (UID: \"f99782ba-1386-4f77-ba13-bb2fd7ab6935\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:17 crc kubenswrapper[4949]: E0216 11:28:17.107917 4949 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Feb 16 11:28:17 crc kubenswrapper[4949]: E0216 11:28:17.107975 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f99782ba-1386-4f77-ba13-bb2fd7ab6935-serving-cert podName:f99782ba-1386-4f77-ba13-bb2fd7ab6935 nodeName:}" failed. No retries permitted until 2026-02-16 11:28:17.607957839 +0000 UTC m=+1287.237292004 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/f99782ba-1386-4f77-ba13-bb2fd7ab6935-serving-cert") pod "observability-ui-dashboards-66cbf594b5-qzxn6" (UID: "f99782ba-1386-4f77-ba13-bb2fd7ab6935") : secret "observability-ui-dashboards" not found Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.186540 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm9hb\" (UniqueName: \"kubernetes.io/projected/f99782ba-1386-4f77-ba13-bb2fd7ab6935-kube-api-access-wm9hb\") pod \"observability-ui-dashboards-66cbf594b5-qzxn6\" (UID: \"f99782ba-1386-4f77-ba13-bb2fd7ab6935\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.451922 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6494cb78b9-hnbth"] Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.453602 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.469690 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.485037 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.496953 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6494cb78b9-hnbth"] Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.507515 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.507715 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.508099 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.512016 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.512960 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.513490 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.513631 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-s6fl4" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.521974 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.526523 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.551855 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-config\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.551902 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-oauth-config\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.551972 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-oauth-serving-cert\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.552037 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.552069 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.552098 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9vhk\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-kube-api-access-x9vhk\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.552129 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-config\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.552154 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-service-ca\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.552416 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9dd62db2-4af9-482c-b9ad-34021e59dae8-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.552495 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.553009 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.553115 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.553634 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-trusted-ca-bundle\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.554016 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.554104 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.554198 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-serving-cert\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.554357 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqkvz\" (UniqueName: \"kubernetes.io/projected/109c24ec-a2c4-46e6-873e-2af9e58440bc-kube-api-access-sqkvz\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.655761 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqkvz\" (UniqueName: \"kubernetes.io/projected/109c24ec-a2c4-46e6-873e-2af9e58440bc-kube-api-access-sqkvz\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656060 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-config\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656134 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-oauth-config\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656241 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f99782ba-1386-4f77-ba13-bb2fd7ab6935-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-qzxn6\" (UID: \"f99782ba-1386-4f77-ba13-bb2fd7ab6935\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656332 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-oauth-serving-cert\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656436 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656514 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656589 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9vhk\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-kube-api-access-x9vhk\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656667 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-config\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656761 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-service-ca\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656840 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9dd62db2-4af9-482c-b9ad-34021e59dae8-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656918 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.656996 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.657086 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.657165 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-trusted-ca-bundle\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.657295 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.657378 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.657450 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-serving-cert\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.658681 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-oauth-serving-cert\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.670437 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-oauth-config\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.676580 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-trusted-ca-bundle\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.685435 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-config\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.685586 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/109c24ec-a2c4-46e6-873e-2af9e58440bc-service-ca\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.689041 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.689083 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/46dd49080b3b962fa6659badf9039db434c1b5170bb8043ef62ce273d0522d39/globalmount\"" pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.690548 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f99782ba-1386-4f77-ba13-bb2fd7ab6935-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-qzxn6\" (UID: \"f99782ba-1386-4f77-ba13-bb2fd7ab6935\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.697164 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.698267 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.701471 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.701726 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqkvz\" (UniqueName: \"kubernetes.io/projected/109c24ec-a2c4-46e6-873e-2af9e58440bc-kube-api-access-sqkvz\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.705032 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/109c24ec-a2c4-46e6-873e-2af9e58440bc-console-serving-cert\") pod \"console-6494cb78b9-hnbth\" (UID: \"109c24ec-a2c4-46e6-873e-2af9e58440bc\") " pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.706144 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.706309 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9dd62db2-4af9-482c-b9ad-34021e59dae8-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.711591 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.739737 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.744583 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9vhk\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-kube-api-access-x9vhk\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.757195 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-config\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.771880 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"prometheus-metric-storage-0\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.781043 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.832866 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.845663 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:28:17 crc kubenswrapper[4949]: I0216 11:28:17.875999 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" Feb 16 11:28:18 crc kubenswrapper[4949]: W0216 11:28:18.203837 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf89f3c3c_f6cb_4d3e_8950_fe35b2bfcc41.slice/crio-e1229e8f2bad278f06824aa65d08667f08e0fa7cb82fc7f8fbc50843c993cae4 WatchSource:0}: Error finding container e1229e8f2bad278f06824aa65d08667f08e0fa7cb82fc7f8fbc50843c993cae4: Status 404 returned error can't find the container with id e1229e8f2bad278f06824aa65d08667f08e0fa7cb82fc7f8fbc50843c993cae4 Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.877866 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-clw7g"] Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.892276 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-clw7g" Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.911761 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.912101 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-pqz4d" Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.912609 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.933029 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-clw7g"] Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.965592 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-fbgr9"] Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.975887 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:18 crc kubenswrapper[4949]: I0216 11:28:18.978258 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-fbgr9"] Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.034720 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-run\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.035024 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dea77106-f4b1-4515-80bb-ebad1a6effcf-combined-ca-bundle\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.035074 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqc5v\" (UniqueName: \"kubernetes.io/projected/dea77106-f4b1-4515-80bb-ebad1a6effcf-kube-api-access-xqc5v\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.035117 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/dea77106-f4b1-4515-80bb-ebad1a6effcf-ovn-controller-tls-certs\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.035143 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-run-ovn\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.040560 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-log-ovn\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.040781 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dea77106-f4b1-4515-80bb-ebad1a6effcf-scripts\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.091583 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41","Type":"ContainerStarted","Data":"e1229e8f2bad278f06824aa65d08667f08e0fa7cb82fc7f8fbc50843c993cae4"} Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.150888 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/dea77106-f4b1-4515-80bb-ebad1a6effcf-ovn-controller-tls-certs\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151003 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-run-ovn\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151031 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-log-ovn\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151087 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-lib\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151126 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dea77106-f4b1-4515-80bb-ebad1a6effcf-scripts\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151209 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-run\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151259 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-etc-ovs\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151281 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6kjv\" (UniqueName: \"kubernetes.io/projected/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-kube-api-access-b6kjv\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151368 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-log\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151435 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-run\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151543 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-scripts\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151640 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dea77106-f4b1-4515-80bb-ebad1a6effcf-combined-ca-bundle\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.151677 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqc5v\" (UniqueName: \"kubernetes.io/projected/dea77106-f4b1-4515-80bb-ebad1a6effcf-kube-api-access-xqc5v\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.156766 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-run\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.156904 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-run-ovn\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.157659 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/dea77106-f4b1-4515-80bb-ebad1a6effcf-var-log-ovn\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.161460 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dea77106-f4b1-4515-80bb-ebad1a6effcf-scripts\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.170905 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dea77106-f4b1-4515-80bb-ebad1a6effcf-combined-ca-bundle\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.178877 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/dea77106-f4b1-4515-80bb-ebad1a6effcf-ovn-controller-tls-certs\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.179360 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqc5v\" (UniqueName: \"kubernetes.io/projected/dea77106-f4b1-4515-80bb-ebad1a6effcf-kube-api-access-xqc5v\") pod \"ovn-controller-clw7g\" (UID: \"dea77106-f4b1-4515-80bb-ebad1a6effcf\") " pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.254843 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-lib\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.254938 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-run\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.254984 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-etc-ovs\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.255006 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6kjv\" (UniqueName: \"kubernetes.io/projected/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-kube-api-access-b6kjv\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.255080 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-log\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.255218 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-scripts\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.256467 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-etc-ovs\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.256652 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-lib\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.256946 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-run\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.257125 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-var-log\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.258092 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-scripts\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.283332 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-clw7g" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.292842 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6kjv\" (UniqueName: \"kubernetes.io/projected/9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c-kube-api-access-b6kjv\") pod \"ovn-controller-ovs-fbgr9\" (UID: \"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c\") " pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.339272 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.391837 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6494cb78b9-hnbth"] Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.668258 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.670926 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.676157 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.676664 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.676923 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-8x2bc" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.677159 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.677306 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.707847 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.819326 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6"] Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.874808 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.875852 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.876164 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b8e94948-e078-4c07-a7db-82f132c3dfed\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b8e94948-e078-4c07-a7db-82f132c3dfed\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.876347 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.876648 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-config\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.876773 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.876846 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.876994 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.877416 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z4pp\" (UniqueName: \"kubernetes.io/projected/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-kube-api-access-8z4pp\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.980566 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.980615 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.980648 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b8e94948-e078-4c07-a7db-82f132c3dfed\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b8e94948-e078-4c07-a7db-82f132c3dfed\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.980674 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.980796 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-config\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.980821 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.980850 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.980931 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z4pp\" (UniqueName: \"kubernetes.io/projected/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-kube-api-access-8z4pp\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.983528 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.983591 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.984282 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-config\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.988709 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.988757 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b8e94948-e078-4c07-a7db-82f132c3dfed\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b8e94948-e078-4c07-a7db-82f132c3dfed\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e3d648e9564588049c3907c07ce621dbfa1ffd69efec33240e404d5eb0d8d92e/globalmount\"" pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:19 crc kubenswrapper[4949]: I0216 11:28:19.990320 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.002519 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.007956 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z4pp\" (UniqueName: \"kubernetes.io/projected/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-kube-api-access-8z4pp\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.009464 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9ba865a-34ea-4b4b-80a4-0d35a63dd064-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.090066 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b8e94948-e078-4c07-a7db-82f132c3dfed\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b8e94948-e078-4c07-a7db-82f132c3dfed\") pod \"ovsdbserver-nb-0\" (UID: \"d9ba865a-34ea-4b4b-80a4-0d35a63dd064\") " pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.150997 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.193656 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerStarted","Data":"d2117446267676f334f6a60075feb4ec2d444f85f1ee0290f5972a69b0282ebc"} Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.213357 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" event={"ID":"f99782ba-1386-4f77-ba13-bb2fd7ab6935","Type":"ContainerStarted","Data":"66d48665d347d61fa795f99c4ad8239c0747001d1478ccbc3817bf8fd1066978"} Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.238106 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6494cb78b9-hnbth" event={"ID":"109c24ec-a2c4-46e6-873e-2af9e58440bc","Type":"ContainerStarted","Data":"ad41ed9c9285cf77ecedf327fec65e9c9a6f7203fd1fee9b61e7abfc0635c861"} Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.275087 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6494cb78b9-hnbth" podStartSLOduration=3.275064488 podStartE2EDuration="3.275064488s" podCreationTimestamp="2026-02-16 11:28:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:28:20.270778166 +0000 UTC m=+1289.900112341" watchObservedRunningTime="2026-02-16 11:28:20.275064488 +0000 UTC m=+1289.904398653" Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.592537 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-clw7g"] Feb 16 11:28:20 crc kubenswrapper[4949]: I0216 11:28:20.970377 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-fbgr9"] Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.292834 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.349178 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6494cb78b9-hnbth" event={"ID":"109c24ec-a2c4-46e6-873e-2af9e58440bc","Type":"ContainerStarted","Data":"b1af3b95356a9a179924bb3a1c22d4252966d6c078e11634a9842e55ecad7830"} Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.354468 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-clw7g" event={"ID":"dea77106-f4b1-4515-80bb-ebad1a6effcf","Type":"ContainerStarted","Data":"b8ae83fa1a672c0eb8386516f913628e382e4c42a9256d2430f3908c76bf7ab5"} Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.596303 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-cswq4"] Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.599087 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.601920 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.607541 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-cswq4"] Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.726528 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99904114-3c39-45d5-84b6-35b9543bdf3a-config\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.726617 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5kwk\" (UniqueName: \"kubernetes.io/projected/99904114-3c39-45d5-84b6-35b9543bdf3a-kube-api-access-r5kwk\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.726825 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/99904114-3c39-45d5-84b6-35b9543bdf3a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.726909 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99904114-3c39-45d5-84b6-35b9543bdf3a-combined-ca-bundle\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.750767 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/99904114-3c39-45d5-84b6-35b9543bdf3a-ovn-rundir\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.751226 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/99904114-3c39-45d5-84b6-35b9543bdf3a-ovs-rundir\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.853302 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/99904114-3c39-45d5-84b6-35b9543bdf3a-ovs-rundir\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.853389 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99904114-3c39-45d5-84b6-35b9543bdf3a-config\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.853435 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5kwk\" (UniqueName: \"kubernetes.io/projected/99904114-3c39-45d5-84b6-35b9543bdf3a-kube-api-access-r5kwk\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.853513 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/99904114-3c39-45d5-84b6-35b9543bdf3a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.853547 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99904114-3c39-45d5-84b6-35b9543bdf3a-combined-ca-bundle\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.853567 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/99904114-3c39-45d5-84b6-35b9543bdf3a-ovn-rundir\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.853941 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/99904114-3c39-45d5-84b6-35b9543bdf3a-ovs-rundir\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.854289 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/99904114-3c39-45d5-84b6-35b9543bdf3a-ovn-rundir\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.856428 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99904114-3c39-45d5-84b6-35b9543bdf3a-config\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.867748 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99904114-3c39-45d5-84b6-35b9543bdf3a-combined-ca-bundle\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.874912 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/99904114-3c39-45d5-84b6-35b9543bdf3a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.890660 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5kwk\" (UniqueName: \"kubernetes.io/projected/99904114-3c39-45d5-84b6-35b9543bdf3a-kube-api-access-r5kwk\") pod \"ovn-controller-metrics-cswq4\" (UID: \"99904114-3c39-45d5-84b6-35b9543bdf3a\") " pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:21 crc kubenswrapper[4949]: I0216 11:28:21.971361 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-cswq4" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.172951 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-qvg6k"] Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.203679 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-w9phd"] Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.228266 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.258150 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.272581 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-w9phd"] Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.382324 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-config\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.382386 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.382453 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59f89\" (UniqueName: \"kubernetes.io/projected/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-kube-api-access-59f89\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.382528 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.485768 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-config\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.485857 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.485934 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59f89\" (UniqueName: \"kubernetes.io/projected/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-kube-api-access-59f89\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.486026 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.488091 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-config\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.497466 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.497706 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.511299 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59f89\" (UniqueName: \"kubernetes.io/projected/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-kube-api-access-59f89\") pod \"dnsmasq-dns-7fd796d7df-w9phd\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.572704 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.758785 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.761339 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.764388 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.765271 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.765795 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.767227 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-ctt9f" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.794743 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.897137 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.897579 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-config\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.897610 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.897656 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6a014519-a0b1-4533-90e8-0e5f8792071e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6a014519-a0b1-4533-90e8-0e5f8792071e\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.897696 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.897721 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.897743 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc7kf\" (UniqueName: \"kubernetes.io/projected/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-kube-api-access-sc7kf\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:22 crc kubenswrapper[4949]: I0216 11:28:22.897800 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:22.999817 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:22.999880 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-config\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:22.999904 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:22.999945 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6a014519-a0b1-4533-90e8-0e5f8792071e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6a014519-a0b1-4533-90e8-0e5f8792071e\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:22.999967 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:22.999989 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.000006 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc7kf\" (UniqueName: \"kubernetes.io/projected/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-kube-api-access-sc7kf\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.000057 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.003111 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.005786 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-config\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.008033 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.014874 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.014923 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6a014519-a0b1-4533-90e8-0e5f8792071e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6a014519-a0b1-4533-90e8-0e5f8792071e\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f7937d15fd6f824a211b429a592f652e4d0e0edc18a821bfba4cc75d5be2b82f/globalmount\"" pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.017804 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.023727 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc7kf\" (UniqueName: \"kubernetes.io/projected/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-kube-api-access-sc7kf\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.025670 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.053798 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c47e2a0-d831-46ee-a13a-93b4c487c4d9-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.089251 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6a014519-a0b1-4533-90e8-0e5f8792071e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6a014519-a0b1-4533-90e8-0e5f8792071e\") pod \"ovsdbserver-sb-0\" (UID: \"0c47e2a0-d831-46ee-a13a-93b4c487c4d9\") " pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:23 crc kubenswrapper[4949]: I0216 11:28:23.399288 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:27 crc kubenswrapper[4949]: I0216 11:28:27.782277 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:27 crc kubenswrapper[4949]: I0216 11:28:27.783086 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:27 crc kubenswrapper[4949]: I0216 11:28:27.789012 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:28 crc kubenswrapper[4949]: I0216 11:28:28.522197 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6494cb78b9-hnbth" Feb 16 11:28:28 crc kubenswrapper[4949]: I0216 11:28:28.606743 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-757d775c7-shl7t"] Feb 16 11:28:29 crc kubenswrapper[4949]: W0216 11:28:29.756774 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9dc5fe0b_3ce0_4647_8c29_f17c359e9f4c.slice/crio-bce362d55f98e1c9d2da113803a46c842254e31388db6f1f1992fc4f01cfd4e7 WatchSource:0}: Error finding container bce362d55f98e1c9d2da113803a46c842254e31388db6f1f1992fc4f01cfd4e7: Status 404 returned error can't find the container with id bce362d55f98e1c9d2da113803a46c842254e31388db6f1f1992fc4f01cfd4e7 Feb 16 11:28:29 crc kubenswrapper[4949]: W0216 11:28:29.760646 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9ba865a_34ea_4b4b_80a4_0d35a63dd064.slice/crio-3fb05753f8da314ed77faa872abf58c843d30a5f91cf60d8b3f41650440555a5 WatchSource:0}: Error finding container 3fb05753f8da314ed77faa872abf58c843d30a5f91cf60d8b3f41650440555a5: Status 404 returned error can't find the container with id 3fb05753f8da314ed77faa872abf58c843d30a5f91cf60d8b3f41650440555a5 Feb 16 11:28:30 crc kubenswrapper[4949]: I0216 11:28:30.556707 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d9ba865a-34ea-4b4b-80a4-0d35a63dd064","Type":"ContainerStarted","Data":"3fb05753f8da314ed77faa872abf58c843d30a5f91cf60d8b3f41650440555a5"} Feb 16 11:28:30 crc kubenswrapper[4949]: I0216 11:28:30.558357 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fbgr9" event={"ID":"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c","Type":"ContainerStarted","Data":"bce362d55f98e1c9d2da113803a46c842254e31388db6f1f1992fc4f01cfd4e7"} Feb 16 11:28:40 crc kubenswrapper[4949]: E0216 11:28:40.145123 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Feb 16 11:28:40 crc kubenswrapper[4949]: E0216 11:28:40.147512 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hxmw9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(9a5b2f17-57bf-4aad-b18c-d1ec47f358c9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:40 crc kubenswrapper[4949]: E0216 11:28:40.149353 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="9a5b2f17-57bf-4aad-b18c-d1ec47f358c9" Feb 16 11:28:40 crc kubenswrapper[4949]: E0216 11:28:40.677954 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="9a5b2f17-57bf-4aad-b18c-d1ec47f358c9" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.204842 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.205881 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pxqff,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(2b4e8478-eec0-499f-a824-b0f07355e4f6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.207094 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.211152 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.211364 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9xqh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-2_openstack(4579a2eb-f9a3-4d32-b67a-d76de7f6a97a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.212579 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-2" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.223764 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.223947 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2wzds,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-1_openstack(2fd90353-44d0-4269-84cc-f90c10eb6da4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.225191 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-1" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.261887 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.262134 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k6l5t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(0f715146-edc4-4f1f-80e3-f134d9833f47): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.263436 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.686840 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-1" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.687070 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-2" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.687231 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.687283 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.968008 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.968374 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n5b9h5f8h5bch65fh6hbfh65ch99hdch545h84h79hdh697h644h667h56fh585h97h5c7h644hd7h58hfdh688h9h5c8hdh67fh686h694h644q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4bz6t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(03acf817-d9f3-4c65-b4c0-920136bc3d7b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:41 crc kubenswrapper[4949]: E0216 11:28:41.969660 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="03acf817-d9f3-4c65-b4c0-920136bc3d7b" Feb 16 11:28:42 crc kubenswrapper[4949]: E0216 11:28:42.063907 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Feb 16 11:28:42 crc kubenswrapper[4949]: E0216 11:28:42.064149 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-psrjk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(f8511a9d-0c08-43c9-9243-f340d75fabe1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:42 crc kubenswrapper[4949]: E0216 11:28:42.065418 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="f8511a9d-0c08-43c9-9243-f340d75fabe1" Feb 16 11:28:42 crc kubenswrapper[4949]: E0216 11:28:42.699779 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="f8511a9d-0c08-43c9-9243-f340d75fabe1" Feb 16 11:28:42 crc kubenswrapper[4949]: E0216 11:28:42.699801 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="03acf817-d9f3-4c65-b4c0-920136bc3d7b" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.320758 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.320976 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q7s92,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-wkwb6_openstack(1241685d-968e-4259-8d7e-f27c55e1fb99): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.324230 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" podUID="1241685d-968e-4259-8d7e-f27c55e1fb99" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.431380 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.431642 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z2gj9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-qvg6k_openstack(2877518f-206f-4886-88c6-22754e8c2556): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.433133 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" podUID="2877518f-206f-4886-88c6-22754e8c2556" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.718259 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" podUID="1241685d-968e-4259-8d7e-f27c55e1fb99" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.809832 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.810578 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init-config-reloader,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a,Command:[/bin/prometheus-config-reloader],Args:[--watch-interval=0 --listen-address=:8081 --config-file=/etc/prometheus/config/prometheus.yaml.gz --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0 --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-1 --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-2],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:reloader-init,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:SHARD,Value:0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/prometheus/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-out,ReadOnly:false,MountPath:/etc/prometheus/config_out,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-0,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-1,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-1,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-2,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-2,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x9vhk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(9dd62db2-4af9-482c-b9ad-34021e59dae8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.813438 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/prometheus-metric-storage-0" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.882896 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.883079 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s4npw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-fsvfh_openstack(138cdd70-b3af-45cd-88a3-c80fcd10e094): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.884957 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" podUID="138cdd70-b3af-45cd-88a3-c80fcd10e094" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.892068 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.892274 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7nz6l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-2j9gn_openstack(4d7f15c5-4079-4ffd-8f92-41c2c2807a7c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:28:43 crc kubenswrapper[4949]: E0216 11:28:43.893677 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" podUID="4d7f15c5-4079-4ffd-8f92-41c2c2807a7c" Feb 16 11:28:45 crc kubenswrapper[4949]: E0216 11:28:44.721788 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.505360 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.565858 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-config\") pod \"2877518f-206f-4886-88c6-22754e8c2556\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.565955 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-dns-svc\") pod \"2877518f-206f-4886-88c6-22754e8c2556\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.566128 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2gj9\" (UniqueName: \"kubernetes.io/projected/2877518f-206f-4886-88c6-22754e8c2556-kube-api-access-z2gj9\") pod \"2877518f-206f-4886-88c6-22754e8c2556\" (UID: \"2877518f-206f-4886-88c6-22754e8c2556\") " Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.572735 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2877518f-206f-4886-88c6-22754e8c2556" (UID: "2877518f-206f-4886-88c6-22754e8c2556"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.573035 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-config" (OuterVolumeSpecName: "config") pod "2877518f-206f-4886-88c6-22754e8c2556" (UID: "2877518f-206f-4886-88c6-22754e8c2556"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.573422 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2877518f-206f-4886-88c6-22754e8c2556-kube-api-access-z2gj9" (OuterVolumeSpecName: "kube-api-access-z2gj9") pod "2877518f-206f-4886-88c6-22754e8c2556" (UID: "2877518f-206f-4886-88c6-22754e8c2556"). InnerVolumeSpecName "kube-api-access-z2gj9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.668375 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.668418 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2877518f-206f-4886-88c6-22754e8c2556-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.668433 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2gj9\" (UniqueName: \"kubernetes.io/projected/2877518f-206f-4886-88c6-22754e8c2556-kube-api-access-z2gj9\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.738939 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" event={"ID":"2877518f-206f-4886-88c6-22754e8c2556","Type":"ContainerDied","Data":"d3bd64e52e2735a7ecfe858405b944f79cd9294120f16d5ccb9ec74e60c6fcea"} Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.738996 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-qvg6k" Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.829211 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-qvg6k"] Feb 16 11:28:45 crc kubenswrapper[4949]: I0216 11:28:45.843355 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-qvg6k"] Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.090078 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.359229 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-w9phd"] Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.679449 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.802026 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nz6l\" (UniqueName: \"kubernetes.io/projected/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-kube-api-access-7nz6l\") pod \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.802855 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-config\") pod \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.803125 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-dns-svc\") pod \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\" (UID: \"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c\") " Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.809471 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-config" (OuterVolumeSpecName: "config") pod "4d7f15c5-4079-4ffd-8f92-41c2c2807a7c" (UID: "4d7f15c5-4079-4ffd-8f92-41c2c2807a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.814854 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4d7f15c5-4079-4ffd-8f92-41c2c2807a7c" (UID: "4d7f15c5-4079-4ffd-8f92-41c2c2807a7c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.830491 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" event={"ID":"4d7f15c5-4079-4ffd-8f92-41c2c2807a7c","Type":"ContainerDied","Data":"e2732a139ab145772c3e38e0040fbb5ab3c6d080c53d7e0fa2e794535fc3607e"} Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.830987 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-2j9gn" Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.881104 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-kube-api-access-7nz6l" (OuterVolumeSpecName: "kube-api-access-7nz6l") pod "4d7f15c5-4079-4ffd-8f92-41c2c2807a7c" (UID: "4d7f15c5-4079-4ffd-8f92-41c2c2807a7c"). InnerVolumeSpecName "kube-api-access-7nz6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.929409 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.937630 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.937683 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:46 crc kubenswrapper[4949]: I0216 11:28:46.937695 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nz6l\" (UniqueName: \"kubernetes.io/projected/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c-kube-api-access-7nz6l\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.039220 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138cdd70-b3af-45cd-88a3-c80fcd10e094-config\") pod \"138cdd70-b3af-45cd-88a3-c80fcd10e094\" (UID: \"138cdd70-b3af-45cd-88a3-c80fcd10e094\") " Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.039316 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4npw\" (UniqueName: \"kubernetes.io/projected/138cdd70-b3af-45cd-88a3-c80fcd10e094-kube-api-access-s4npw\") pod \"138cdd70-b3af-45cd-88a3-c80fcd10e094\" (UID: \"138cdd70-b3af-45cd-88a3-c80fcd10e094\") " Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.040611 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/138cdd70-b3af-45cd-88a3-c80fcd10e094-config" (OuterVolumeSpecName: "config") pod "138cdd70-b3af-45cd-88a3-c80fcd10e094" (UID: "138cdd70-b3af-45cd-88a3-c80fcd10e094"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.044703 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/138cdd70-b3af-45cd-88a3-c80fcd10e094-kube-api-access-s4npw" (OuterVolumeSpecName: "kube-api-access-s4npw") pod "138cdd70-b3af-45cd-88a3-c80fcd10e094" (UID: "138cdd70-b3af-45cd-88a3-c80fcd10e094"). InnerVolumeSpecName "kube-api-access-s4npw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.125186 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-cswq4"] Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.142011 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138cdd70-b3af-45cd-88a3-c80fcd10e094-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.142059 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4npw\" (UniqueName: \"kubernetes.io/projected/138cdd70-b3af-45cd-88a3-c80fcd10e094-kube-api-access-s4npw\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.202382 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2j9gn"] Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.211669 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2j9gn"] Feb 16 11:28:47 crc kubenswrapper[4949]: W0216 11:28:47.237130 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99904114_3c39_45d5_84b6_35b9543bdf3a.slice/crio-b274d0a16edc60040434262ae69a1f7fcc438257ad63dc678ef17ca05320656e WatchSource:0}: Error finding container b274d0a16edc60040434262ae69a1f7fcc438257ad63dc678ef17ca05320656e: Status 404 returned error can't find the container with id b274d0a16edc60040434262ae69a1f7fcc438257ad63dc678ef17ca05320656e Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.262059 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2877518f-206f-4886-88c6-22754e8c2556" path="/var/lib/kubelet/pods/2877518f-206f-4886-88c6-22754e8c2556/volumes" Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.262956 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d7f15c5-4079-4ffd-8f92-41c2c2807a7c" path="/var/lib/kubelet/pods/4d7f15c5-4079-4ffd-8f92-41c2c2807a7c/volumes" Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.871455 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"0c47e2a0-d831-46ee-a13a-93b4c487c4d9","Type":"ContainerStarted","Data":"22ed8e4ab28550f2e51f14475d95f2174a3a5f080ffa661fffc5ce8b69c90601"} Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.883340 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" event={"ID":"138cdd70-b3af-45cd-88a3-c80fcd10e094","Type":"ContainerDied","Data":"076d48114a77751b984e9703f8547c6a8c334cae7317399283a326d2b8c4f576"} Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.883438 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-fsvfh" Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.896950 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-cswq4" event={"ID":"99904114-3c39-45d5-84b6-35b9543bdf3a","Type":"ContainerStarted","Data":"b274d0a16edc60040434262ae69a1f7fcc438257ad63dc678ef17ca05320656e"} Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.914920 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" event={"ID":"d4a2be70-f0e5-4f39-8b5e-744b868a47ad","Type":"ContainerStarted","Data":"0cd2aae14337491d73d6386794a2cad25c3db227d7301e3f341d8be48279d120"} Feb 16 11:28:47 crc kubenswrapper[4949]: I0216 11:28:47.980270 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fsvfh"] Feb 16 11:28:48 crc kubenswrapper[4949]: I0216 11:28:48.010694 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-fsvfh"] Feb 16 11:28:48 crc kubenswrapper[4949]: E0216 11:28:48.554968 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Feb 16 11:28:48 crc kubenswrapper[4949]: E0216 11:28:48.555022 4949 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Feb 16 11:28:48 crc kubenswrapper[4949]: E0216 11:28:48.555158 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tm986,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 16 11:28:48 crc kubenswrapper[4949]: E0216 11:28:48.556409 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" Feb 16 11:28:48 crc kubenswrapper[4949]: E0216 11:28:48.927150 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" Feb 16 11:28:49 crc kubenswrapper[4949]: I0216 11:28:49.321287 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="138cdd70-b3af-45cd-88a3-c80fcd10e094" path="/var/lib/kubelet/pods/138cdd70-b3af-45cd-88a3-c80fcd10e094/volumes" Feb 16 11:28:49 crc kubenswrapper[4949]: I0216 11:28:49.938439 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" event={"ID":"f99782ba-1386-4f77-ba13-bb2fd7ab6935","Type":"ContainerStarted","Data":"e75034b051116f19b9e600583b9104c21602e3282775f0b6f7ff33e6bb273e5d"} Feb 16 11:28:49 crc kubenswrapper[4949]: I0216 11:28:49.957485 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-qzxn6" podStartSLOduration=6.9581309749999996 podStartE2EDuration="33.957462432s" podCreationTimestamp="2026-02-16 11:28:16 +0000 UTC" firstStartedPulling="2026-02-16 11:28:19.833018284 +0000 UTC m=+1289.462352449" lastFinishedPulling="2026-02-16 11:28:46.832349741 +0000 UTC m=+1316.461683906" observedRunningTime="2026-02-16 11:28:49.954976331 +0000 UTC m=+1319.584310496" watchObservedRunningTime="2026-02-16 11:28:49.957462432 +0000 UTC m=+1319.586796597" Feb 16 11:28:50 crc kubenswrapper[4949]: I0216 11:28:50.953311 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fbgr9" event={"ID":"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c","Type":"ContainerStarted","Data":"376a400578afbf5313c2b5a39ea01958469a730d06bafc19070f51f4db3340df"} Feb 16 11:28:50 crc kubenswrapper[4949]: I0216 11:28:50.955266 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-clw7g" event={"ID":"dea77106-f4b1-4515-80bb-ebad1a6effcf","Type":"ContainerStarted","Data":"20a0462b4369a1e8421b7a7e35c0c77203c2e0f5bfa186ffb1034c5ab5810489"} Feb 16 11:28:50 crc kubenswrapper[4949]: I0216 11:28:50.955557 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-clw7g" Feb 16 11:28:50 crc kubenswrapper[4949]: I0216 11:28:50.960191 4949 generic.go:334] "Generic (PLEG): container finished" podID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerID="911cded894ec173a8503d795051bfda7229347e75364329d47657b4ecf323c84" exitCode=0 Feb 16 11:28:50 crc kubenswrapper[4949]: I0216 11:28:50.960401 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" event={"ID":"d4a2be70-f0e5-4f39-8b5e-744b868a47ad","Type":"ContainerDied","Data":"911cded894ec173a8503d795051bfda7229347e75364329d47657b4ecf323c84"} Feb 16 11:28:50 crc kubenswrapper[4949]: I0216 11:28:50.963391 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"0c47e2a0-d831-46ee-a13a-93b4c487c4d9","Type":"ContainerStarted","Data":"84c2c0386706878ce60d9ec7f67d712c33ecf09c850339680cfeb30c49fb0a27"} Feb 16 11:28:50 crc kubenswrapper[4949]: I0216 11:28:50.967849 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d9ba865a-34ea-4b4b-80a4-0d35a63dd064","Type":"ContainerStarted","Data":"c11028796631a74615ce9899d1a804ae486ab0eb6eddd60d98d1373994f52d00"} Feb 16 11:28:51 crc kubenswrapper[4949]: I0216 11:28:51.024466 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-clw7g" podStartSLOduration=6.385806645 podStartE2EDuration="33.024447512s" podCreationTimestamp="2026-02-16 11:28:18 +0000 UTC" firstStartedPulling="2026-02-16 11:28:20.738744448 +0000 UTC m=+1290.368078613" lastFinishedPulling="2026-02-16 11:28:47.377385315 +0000 UTC m=+1317.006719480" observedRunningTime="2026-02-16 11:28:51.022317902 +0000 UTC m=+1320.651652077" watchObservedRunningTime="2026-02-16 11:28:51.024447512 +0000 UTC m=+1320.653781677" Feb 16 11:28:51 crc kubenswrapper[4949]: I0216 11:28:51.980424 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c" containerID="376a400578afbf5313c2b5a39ea01958469a730d06bafc19070f51f4db3340df" exitCode=0 Feb 16 11:28:51 crc kubenswrapper[4949]: I0216 11:28:51.980544 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fbgr9" event={"ID":"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c","Type":"ContainerDied","Data":"376a400578afbf5313c2b5a39ea01958469a730d06bafc19070f51f4db3340df"} Feb 16 11:28:52 crc kubenswrapper[4949]: I0216 11:28:52.994744 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d9ba865a-34ea-4b4b-80a4-0d35a63dd064","Type":"ContainerStarted","Data":"3c8c39ca01a3e867f1a0cb4cc1588c031f6673a9ac19c8278fcff78b6cd81d98"} Feb 16 11:28:52 crc kubenswrapper[4949]: I0216 11:28:52.999820 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fbgr9" event={"ID":"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c","Type":"ContainerStarted","Data":"01f2a0ee58af509e1847ea8b1c4b0e38b1acaa525c9caafb25dcc6eb16551751"} Feb 16 11:28:52 crc kubenswrapper[4949]: I0216 11:28:52.999878 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:52 crc kubenswrapper[4949]: I0216 11:28:52.999890 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fbgr9" event={"ID":"9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c","Type":"ContainerStarted","Data":"bd4660bff732a7792092d1775de797be0a39ba79a2ab7cd51fdf3a9833779137"} Feb 16 11:28:52 crc kubenswrapper[4949]: I0216 11:28:52.999937 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.008210 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-cswq4" event={"ID":"99904114-3c39-45d5-84b6-35b9543bdf3a","Type":"ContainerStarted","Data":"a1b0e812e3553d0eaad3ef744efe652ffce0b0c260237d34523e170a195502e8"} Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.015014 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" event={"ID":"d4a2be70-f0e5-4f39-8b5e-744b868a47ad","Type":"ContainerStarted","Data":"2148a9357911a282d7199e1a05531344f0da39d0dc0cfa73bb2586a3b1c21e7e"} Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.015074 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.017368 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"0c47e2a0-d831-46ee-a13a-93b4c487c4d9","Type":"ContainerStarted","Data":"81ccb7e82d0baabf65d86cc4d2c757bd1473a52c2a3a5375f4ebf08961ba6884"} Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.022776 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=13.10890925 podStartE2EDuration="35.022753657s" podCreationTimestamp="2026-02-16 11:28:18 +0000 UTC" firstStartedPulling="2026-02-16 11:28:29.76465632 +0000 UTC m=+1299.393990485" lastFinishedPulling="2026-02-16 11:28:51.678500727 +0000 UTC m=+1321.307834892" observedRunningTime="2026-02-16 11:28:53.01866052 +0000 UTC m=+1322.647994685" watchObservedRunningTime="2026-02-16 11:28:53.022753657 +0000 UTC m=+1322.652087832" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.039829 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-cswq4" podStartSLOduration=27.61593601 podStartE2EDuration="32.039806573s" podCreationTimestamp="2026-02-16 11:28:21 +0000 UTC" firstStartedPulling="2026-02-16 11:28:47.254073918 +0000 UTC m=+1316.883408083" lastFinishedPulling="2026-02-16 11:28:51.677944481 +0000 UTC m=+1321.307278646" observedRunningTime="2026-02-16 11:28:53.03935532 +0000 UTC m=+1322.668689495" watchObservedRunningTime="2026-02-16 11:28:53.039806573 +0000 UTC m=+1322.669140738" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.083986 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" podStartSLOduration=28.308328528 podStartE2EDuration="31.083963482s" podCreationTimestamp="2026-02-16 11:28:22 +0000 UTC" firstStartedPulling="2026-02-16 11:28:46.847862273 +0000 UTC m=+1316.477196438" lastFinishedPulling="2026-02-16 11:28:49.623497237 +0000 UTC m=+1319.252831392" observedRunningTime="2026-02-16 11:28:53.070946281 +0000 UTC m=+1322.700280456" watchObservedRunningTime="2026-02-16 11:28:53.083963482 +0000 UTC m=+1322.713297647" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.115430 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=27.267107632 podStartE2EDuration="32.115397429s" podCreationTimestamp="2026-02-16 11:28:21 +0000 UTC" firstStartedPulling="2026-02-16 11:28:46.834474891 +0000 UTC m=+1316.463809056" lastFinishedPulling="2026-02-16 11:28:51.682764688 +0000 UTC m=+1321.312098853" observedRunningTime="2026-02-16 11:28:53.092143066 +0000 UTC m=+1322.721477231" watchObservedRunningTime="2026-02-16 11:28:53.115397429 +0000 UTC m=+1322.744731594" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.135083 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-fbgr9" podStartSLOduration=17.517376484 podStartE2EDuration="35.13505643s" podCreationTimestamp="2026-02-16 11:28:18 +0000 UTC" firstStartedPulling="2026-02-16 11:28:29.759710899 +0000 UTC m=+1299.389045064" lastFinishedPulling="2026-02-16 11:28:47.377390845 +0000 UTC m=+1317.006725010" observedRunningTime="2026-02-16 11:28:53.126377392 +0000 UTC m=+1322.755711567" watchObservedRunningTime="2026-02-16 11:28:53.13505643 +0000 UTC m=+1322.764390605" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.152157 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.201287 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.400237 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.400290 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.430271 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wkwb6"] Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.485497 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-kslw5"] Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.487856 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.490517 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.563098 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-config\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.563231 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.563292 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.563389 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.563564 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l44vw\" (UniqueName: \"kubernetes.io/projected/9cba7e57-5348-4b72-bf94-43da9a6a504d-kube-api-access-l44vw\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.602307 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-kslw5"] Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.620308 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.667654 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l44vw\" (UniqueName: \"kubernetes.io/projected/9cba7e57-5348-4b72-bf94-43da9a6a504d-kube-api-access-l44vw\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.667741 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-config\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.667819 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.667871 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.667923 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.668987 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.669459 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-config\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.669644 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.670312 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.681889 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-757d775c7-shl7t" podUID="c4ac199e-a5f1-4fc7-ad0f-ce907557928d" containerName="console" containerID="cri-o://dc828a6055f2d9d222c312230d65386d41faf576c5f5ecfd9e85c06814e64733" gracePeriod=15 Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.711704 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l44vw\" (UniqueName: \"kubernetes.io/projected/9cba7e57-5348-4b72-bf94-43da9a6a504d-kube-api-access-l44vw\") pod \"dnsmasq-dns-86db49b7ff-kslw5\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:53 crc kubenswrapper[4949]: I0216 11:28:53.982225 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.045210 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-757d775c7-shl7t_c4ac199e-a5f1-4fc7-ad0f-ce907557928d/console/0.log" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.045261 4949 generic.go:334] "Generic (PLEG): container finished" podID="c4ac199e-a5f1-4fc7-ad0f-ce907557928d" containerID="dc828a6055f2d9d222c312230d65386d41faf576c5f5ecfd9e85c06814e64733" exitCode=2 Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.046489 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-757d775c7-shl7t" event={"ID":"c4ac199e-a5f1-4fc7-ad0f-ce907557928d","Type":"ContainerDied","Data":"dc828a6055f2d9d222c312230d65386d41faf576c5f5ecfd9e85c06814e64733"} Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.048342 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.258457 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.402133 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-dns-svc\") pod \"1241685d-968e-4259-8d7e-f27c55e1fb99\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.402610 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7s92\" (UniqueName: \"kubernetes.io/projected/1241685d-968e-4259-8d7e-f27c55e1fb99-kube-api-access-q7s92\") pod \"1241685d-968e-4259-8d7e-f27c55e1fb99\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.402871 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-config\") pod \"1241685d-968e-4259-8d7e-f27c55e1fb99\" (UID: \"1241685d-968e-4259-8d7e-f27c55e1fb99\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.404726 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1241685d-968e-4259-8d7e-f27c55e1fb99" (UID: "1241685d-968e-4259-8d7e-f27c55e1fb99"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.410953 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1241685d-968e-4259-8d7e-f27c55e1fb99-kube-api-access-q7s92" (OuterVolumeSpecName: "kube-api-access-q7s92") pod "1241685d-968e-4259-8d7e-f27c55e1fb99" (UID: "1241685d-968e-4259-8d7e-f27c55e1fb99"). InnerVolumeSpecName "kube-api-access-q7s92". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.412344 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-config" (OuterVolumeSpecName: "config") pod "1241685d-968e-4259-8d7e-f27c55e1fb99" (UID: "1241685d-968e-4259-8d7e-f27c55e1fb99"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.535299 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.535352 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7s92\" (UniqueName: \"kubernetes.io/projected/1241685d-968e-4259-8d7e-f27c55e1fb99-kube-api-access-q7s92\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.535366 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1241685d-968e-4259-8d7e-f27c55e1fb99-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.659206 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-757d775c7-shl7t_c4ac199e-a5f1-4fc7-ad0f-ce907557928d/console/0.log" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.659614 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.738540 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-kslw5"] Feb 16 11:28:54 crc kubenswrapper[4949]: W0216 11:28:54.748900 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9cba7e57_5348_4b72_bf94_43da9a6a504d.slice/crio-88969d4d9c7d40b6e8b64b25a2e172d560714fe74140367390ee092af0acc553 WatchSource:0}: Error finding container 88969d4d9c7d40b6e8b64b25a2e172d560714fe74140367390ee092af0acc553: Status 404 returned error can't find the container with id 88969d4d9c7d40b6e8b64b25a2e172d560714fe74140367390ee092af0acc553 Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.840445 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-config\") pod \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.840989 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-serving-cert\") pod \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.841023 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-oauth-serving-cert\") pod \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.841071 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-service-ca\") pod \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.841110 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-oauth-config\") pod \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.841269 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-trusted-ca-bundle\") pod \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.841443 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl47r\" (UniqueName: \"kubernetes.io/projected/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-kube-api-access-bl47r\") pod \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\" (UID: \"c4ac199e-a5f1-4fc7-ad0f-ce907557928d\") " Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.848500 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "c4ac199e-a5f1-4fc7-ad0f-ce907557928d" (UID: "c4ac199e-a5f1-4fc7-ad0f-ce907557928d"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.848544 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-service-ca" (OuterVolumeSpecName: "service-ca") pod "c4ac199e-a5f1-4fc7-ad0f-ce907557928d" (UID: "c4ac199e-a5f1-4fc7-ad0f-ce907557928d"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.849795 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-config" (OuterVolumeSpecName: "console-config") pod "c4ac199e-a5f1-4fc7-ad0f-ce907557928d" (UID: "c4ac199e-a5f1-4fc7-ad0f-ce907557928d"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.849979 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "c4ac199e-a5f1-4fc7-ad0f-ce907557928d" (UID: "c4ac199e-a5f1-4fc7-ad0f-ce907557928d"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.850145 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-kube-api-access-bl47r" (OuterVolumeSpecName: "kube-api-access-bl47r") pod "c4ac199e-a5f1-4fc7-ad0f-ce907557928d" (UID: "c4ac199e-a5f1-4fc7-ad0f-ce907557928d"). InnerVolumeSpecName "kube-api-access-bl47r". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.851291 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "c4ac199e-a5f1-4fc7-ad0f-ce907557928d" (UID: "c4ac199e-a5f1-4fc7-ad0f-ce907557928d"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.852314 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "c4ac199e-a5f1-4fc7-ad0f-ce907557928d" (UID: "c4ac199e-a5f1-4fc7-ad0f-ce907557928d"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.944423 4949 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.944457 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl47r\" (UniqueName: \"kubernetes.io/projected/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-kube-api-access-bl47r\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.944470 4949 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.944482 4949 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.944495 4949 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.944507 4949 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-service-ca\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:54 crc kubenswrapper[4949]: I0216 11:28:54.944516 4949 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c4ac199e-a5f1-4fc7-ad0f-ce907557928d-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.056351 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2b4e8478-eec0-499f-a824-b0f07355e4f6","Type":"ContainerStarted","Data":"184ebec4345da93f1c2a65db2c7a0090e4039d1d6b0ebf753debbf33a95841da"} Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.063304 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" event={"ID":"9cba7e57-5348-4b72-bf94-43da9a6a504d","Type":"ContainerStarted","Data":"f33caa37b2130d80ccd20ffb48d1c41c74fb07d9a68e196bd134751c2fcb1e00"} Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.063353 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" event={"ID":"9cba7e57-5348-4b72-bf94-43da9a6a504d","Type":"ContainerStarted","Data":"88969d4d9c7d40b6e8b64b25a2e172d560714fe74140367390ee092af0acc553"} Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.067575 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9","Type":"ContainerStarted","Data":"5f8ea1337baab88fe1d912b3f25196028379785b5d05c9d3a31df4cb974124b9"} Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.074968 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" event={"ID":"1241685d-968e-4259-8d7e-f27c55e1fb99","Type":"ContainerDied","Data":"083a05a0f269c758953ec9c4ec64ea9ac4d951a16604a77925f11ba26dd87657"} Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.075084 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wkwb6" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.077701 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-757d775c7-shl7t_c4ac199e-a5f1-4fc7-ad0f-ce907557928d/console/0.log" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.078983 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-757d775c7-shl7t" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.080823 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-757d775c7-shl7t" event={"ID":"c4ac199e-a5f1-4fc7-ad0f-ce907557928d","Type":"ContainerDied","Data":"59bcb39fdc28f782ef304ed5482cfff2aedc5d962b7b92d6833ee76c87f8486c"} Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.080895 4949 scope.go:117] "RemoveContainer" containerID="dc828a6055f2d9d222c312230d65386d41faf576c5f5ecfd9e85c06814e64733" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.180862 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.180934 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.481053 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-757d775c7-shl7t"] Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.529628 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-757d775c7-shl7t"] Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.603145 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wkwb6"] Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.632305 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wkwb6"] Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.655813 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Feb 16 11:28:55 crc kubenswrapper[4949]: E0216 11:28:55.656521 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4ac199e-a5f1-4fc7-ad0f-ce907557928d" containerName="console" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.656542 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4ac199e-a5f1-4fc7-ad0f-ce907557928d" containerName="console" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.656793 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4ac199e-a5f1-4fc7-ad0f-ce907557928d" containerName="console" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.658142 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.661930 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-sxxnx" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.662214 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.662257 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.664080 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.686898 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.699900 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e580bb4f-88f7-4c69-8eb5-669fc9733593-config\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.700093 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e580bb4f-88f7-4c69-8eb5-669fc9733593-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.700230 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.700344 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e580bb4f-88f7-4c69-8eb5-669fc9733593-scripts\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.700442 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.700505 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.700577 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdmxj\" (UniqueName: \"kubernetes.io/projected/e580bb4f-88f7-4c69-8eb5-669fc9733593-kube-api-access-qdmxj\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.802695 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.803093 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.803127 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdmxj\" (UniqueName: \"kubernetes.io/projected/e580bb4f-88f7-4c69-8eb5-669fc9733593-kube-api-access-qdmxj\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.803199 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e580bb4f-88f7-4c69-8eb5-669fc9733593-config\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.803271 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e580bb4f-88f7-4c69-8eb5-669fc9733593-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.803322 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.803371 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e580bb4f-88f7-4c69-8eb5-669fc9733593-scripts\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.804324 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e580bb4f-88f7-4c69-8eb5-669fc9733593-scripts\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.804421 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e580bb4f-88f7-4c69-8eb5-669fc9733593-config\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.804716 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e580bb4f-88f7-4c69-8eb5-669fc9733593-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.812869 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.813854 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.814628 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e580bb4f-88f7-4c69-8eb5-669fc9733593-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.825210 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdmxj\" (UniqueName: \"kubernetes.io/projected/e580bb4f-88f7-4c69-8eb5-669fc9733593-kube-api-access-qdmxj\") pod \"ovn-northd-0\" (UID: \"e580bb4f-88f7-4c69-8eb5-669fc9733593\") " pod="openstack/ovn-northd-0" Feb 16 11:28:55 crc kubenswrapper[4949]: I0216 11:28:55.987814 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 16 11:28:56 crc kubenswrapper[4949]: I0216 11:28:56.091292 4949 generic.go:334] "Generic (PLEG): container finished" podID="9cba7e57-5348-4b72-bf94-43da9a6a504d" containerID="f33caa37b2130d80ccd20ffb48d1c41c74fb07d9a68e196bd134751c2fcb1e00" exitCode=0 Feb 16 11:28:56 crc kubenswrapper[4949]: I0216 11:28:56.092699 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" event={"ID":"9cba7e57-5348-4b72-bf94-43da9a6a504d","Type":"ContainerDied","Data":"f33caa37b2130d80ccd20ffb48d1c41c74fb07d9a68e196bd134751c2fcb1e00"} Feb 16 11:28:56 crc kubenswrapper[4949]: I0216 11:28:56.092744 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:28:56 crc kubenswrapper[4949]: I0216 11:28:56.092762 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" event={"ID":"9cba7e57-5348-4b72-bf94-43da9a6a504d","Type":"ContainerStarted","Data":"cd2623268180de8b4383b823158db3c698c76e8cfb0120e6e2ea8cd841746ae4"} Feb 16 11:28:56 crc kubenswrapper[4949]: I0216 11:28:56.113970 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" podStartSLOduration=3.11394908 podStartE2EDuration="3.11394908s" podCreationTimestamp="2026-02-16 11:28:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:28:56.113528198 +0000 UTC m=+1325.742862383" watchObservedRunningTime="2026-02-16 11:28:56.11394908 +0000 UTC m=+1325.743283245" Feb 16 11:28:56 crc kubenswrapper[4949]: I0216 11:28:56.624762 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Feb 16 11:28:57 crc kubenswrapper[4949]: I0216 11:28:57.104919 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"03acf817-d9f3-4c65-b4c0-920136bc3d7b","Type":"ContainerStarted","Data":"fce07100897708b28b21aa8caa52cb2cdd0a3fd58e136b256e71ff3258543658"} Feb 16 11:28:57 crc kubenswrapper[4949]: I0216 11:28:57.105793 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Feb 16 11:28:57 crc kubenswrapper[4949]: I0216 11:28:57.107531 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e580bb4f-88f7-4c69-8eb5-669fc9733593","Type":"ContainerStarted","Data":"7d8c5f44c1375d228c04175ee962aced45fbf4a51e99f224e151c622e158c044"} Feb 16 11:28:57 crc kubenswrapper[4949]: I0216 11:28:57.141446 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=3.190493806 podStartE2EDuration="44.141421725s" podCreationTimestamp="2026-02-16 11:28:13 +0000 UTC" firstStartedPulling="2026-02-16 11:28:14.7916796 +0000 UTC m=+1284.421013755" lastFinishedPulling="2026-02-16 11:28:55.742607509 +0000 UTC m=+1325.371941674" observedRunningTime="2026-02-16 11:28:57.128922979 +0000 UTC m=+1326.758257144" watchObservedRunningTime="2026-02-16 11:28:57.141421725 +0000 UTC m=+1326.770755880" Feb 16 11:28:57 crc kubenswrapper[4949]: I0216 11:28:57.260338 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1241685d-968e-4259-8d7e-f27c55e1fb99" path="/var/lib/kubelet/pods/1241685d-968e-4259-8d7e-f27c55e1fb99/volumes" Feb 16 11:28:57 crc kubenswrapper[4949]: I0216 11:28:57.261443 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4ac199e-a5f1-4fc7-ad0f-ce907557928d" path="/var/lib/kubelet/pods/c4ac199e-a5f1-4fc7-ad0f-ce907557928d/volumes" Feb 16 11:28:57 crc kubenswrapper[4949]: I0216 11:28:57.574499 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:28:58 crc kubenswrapper[4949]: I0216 11:28:58.119507 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f8511a9d-0c08-43c9-9243-f340d75fabe1","Type":"ContainerStarted","Data":"eb64d9c169e20c3d0eb709f7c3b0091da53a3cfe1487cc6df9cb64b296e30907"} Feb 16 11:28:58 crc kubenswrapper[4949]: I0216 11:28:58.121271 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f715146-edc4-4f1f-80e3-f134d9833f47","Type":"ContainerStarted","Data":"f75d823174942d793b39e3e51f5a70d2c313e2e41e562884d6fd8299f71d272c"} Feb 16 11:28:58 crc kubenswrapper[4949]: I0216 11:28:58.123773 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2fd90353-44d0-4269-84cc-f90c10eb6da4","Type":"ContainerStarted","Data":"73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9"} Feb 16 11:28:59 crc kubenswrapper[4949]: I0216 11:28:59.132987 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a","Type":"ContainerStarted","Data":"28dbd9a3450bc4c9940c72d847e3a36ef6fd5140215944f266e71e0a01581677"} Feb 16 11:28:59 crc kubenswrapper[4949]: I0216 11:28:59.135848 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e580bb4f-88f7-4c69-8eb5-669fc9733593","Type":"ContainerStarted","Data":"c8866960dba08c4de785aa5efe2db4516de0a573f9c6c3a604b8f41fb73c882e"} Feb 16 11:28:59 crc kubenswrapper[4949]: I0216 11:28:59.135896 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e580bb4f-88f7-4c69-8eb5-669fc9733593","Type":"ContainerStarted","Data":"3feac3ddd846d240e523968675591da05f54390e61b651d8a791bdd377944613"} Feb 16 11:28:59 crc kubenswrapper[4949]: I0216 11:28:59.136010 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Feb 16 11:28:59 crc kubenswrapper[4949]: I0216 11:28:59.247402 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.730138605 podStartE2EDuration="4.247371448s" podCreationTimestamp="2026-02-16 11:28:55 +0000 UTC" firstStartedPulling="2026-02-16 11:28:56.633247341 +0000 UTC m=+1326.262581506" lastFinishedPulling="2026-02-16 11:28:58.150480184 +0000 UTC m=+1327.779814349" observedRunningTime="2026-02-16 11:28:59.226915895 +0000 UTC m=+1328.856250070" watchObservedRunningTime="2026-02-16 11:28:59.247371448 +0000 UTC m=+1328.876705613" Feb 16 11:29:00 crc kubenswrapper[4949]: I0216 11:29:00.149468 4949 generic.go:334] "Generic (PLEG): container finished" podID="9a5b2f17-57bf-4aad-b18c-d1ec47f358c9" containerID="5f8ea1337baab88fe1d912b3f25196028379785b5d05c9d3a31df4cb974124b9" exitCode=0 Feb 16 11:29:00 crc kubenswrapper[4949]: I0216 11:29:00.149535 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9","Type":"ContainerDied","Data":"5f8ea1337baab88fe1d912b3f25196028379785b5d05c9d3a31df4cb974124b9"} Feb 16 11:29:01 crc kubenswrapper[4949]: I0216 11:29:01.177924 4949 generic.go:334] "Generic (PLEG): container finished" podID="f8511a9d-0c08-43c9-9243-f340d75fabe1" containerID="eb64d9c169e20c3d0eb709f7c3b0091da53a3cfe1487cc6df9cb64b296e30907" exitCode=0 Feb 16 11:29:01 crc kubenswrapper[4949]: I0216 11:29:01.178032 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f8511a9d-0c08-43c9-9243-f340d75fabe1","Type":"ContainerDied","Data":"eb64d9c169e20c3d0eb709f7c3b0091da53a3cfe1487cc6df9cb64b296e30907"} Feb 16 11:29:01 crc kubenswrapper[4949]: I0216 11:29:01.191257 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41","Type":"ContainerStarted","Data":"04cbb55a710e1af3150d6facd8083abe6253ef0aea8c224eef19eef55a9ab0ed"} Feb 16 11:29:01 crc kubenswrapper[4949]: I0216 11:29:01.191921 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Feb 16 11:29:01 crc kubenswrapper[4949]: I0216 11:29:01.195141 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"9a5b2f17-57bf-4aad-b18c-d1ec47f358c9","Type":"ContainerStarted","Data":"40d914317b61f579ecec7a903d0e35a8629cc83dada2788c8cab7daf11f5fc9f"} Feb 16 11:29:01 crc kubenswrapper[4949]: I0216 11:29:01.261972 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=8.866831026 podStartE2EDuration="49.261950986s" podCreationTimestamp="2026-02-16 11:28:12 +0000 UTC" firstStartedPulling="2026-02-16 11:28:14.28269437 +0000 UTC m=+1283.912028535" lastFinishedPulling="2026-02-16 11:28:54.67781433 +0000 UTC m=+1324.307148495" observedRunningTime="2026-02-16 11:29:01.229072968 +0000 UTC m=+1330.858407133" watchObservedRunningTime="2026-02-16 11:29:01.261950986 +0000 UTC m=+1330.891285151" Feb 16 11:29:01 crc kubenswrapper[4949]: I0216 11:29:01.270930 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.48695093 podStartE2EDuration="45.270912001s" podCreationTimestamp="2026-02-16 11:28:16 +0000 UTC" firstStartedPulling="2026-02-16 11:28:18.318333741 +0000 UTC m=+1287.947667896" lastFinishedPulling="2026-02-16 11:29:00.102294812 +0000 UTC m=+1329.731628967" observedRunningTime="2026-02-16 11:29:01.246706501 +0000 UTC m=+1330.876040666" watchObservedRunningTime="2026-02-16 11:29:01.270912001 +0000 UTC m=+1330.900246166" Feb 16 11:29:02 crc kubenswrapper[4949]: I0216 11:29:02.206941 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f8511a9d-0c08-43c9-9243-f340d75fabe1","Type":"ContainerStarted","Data":"7b261dbe81180d5ef758dcf4aa45dc49747b17eeceb70cc4c9916d97c9843686"} Feb 16 11:29:02 crc kubenswrapper[4949]: I0216 11:29:02.236959 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371984.617842 podStartE2EDuration="52.236933483s" podCreationTimestamp="2026-02-16 11:28:10 +0000 UTC" firstStartedPulling="2026-02-16 11:28:13.449618406 +0000 UTC m=+1283.078952571" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:02.230300074 +0000 UTC m=+1331.859634239" watchObservedRunningTime="2026-02-16 11:29:02.236933483 +0000 UTC m=+1331.866267658" Feb 16 11:29:02 crc kubenswrapper[4949]: I0216 11:29:02.273222 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Feb 16 11:29:02 crc kubenswrapper[4949]: I0216 11:29:02.273265 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Feb 16 11:29:03 crc kubenswrapper[4949]: I0216 11:29:03.499943 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Feb 16 11:29:03 crc kubenswrapper[4949]: I0216 11:29:03.505309 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Feb 16 11:29:03 crc kubenswrapper[4949]: I0216 11:29:03.983457 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:29:04 crc kubenswrapper[4949]: I0216 11:29:04.030443 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Feb 16 11:29:04 crc kubenswrapper[4949]: I0216 11:29:04.053357 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-w9phd"] Feb 16 11:29:04 crc kubenswrapper[4949]: I0216 11:29:04.053668 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" podUID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerName="dnsmasq-dns" containerID="cri-o://2148a9357911a282d7199e1a05531344f0da39d0dc0cfa73bb2586a3b1c21e7e" gracePeriod=10 Feb 16 11:29:05 crc kubenswrapper[4949]: I0216 11:29:05.251927 4949 generic.go:334] "Generic (PLEG): container finished" podID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerID="2148a9357911a282d7199e1a05531344f0da39d0dc0cfa73bb2586a3b1c21e7e" exitCode=0 Feb 16 11:29:05 crc kubenswrapper[4949]: I0216 11:29:05.251999 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" event={"ID":"d4a2be70-f0e5-4f39-8b5e-744b868a47ad","Type":"ContainerDied","Data":"2148a9357911a282d7199e1a05531344f0da39d0dc0cfa73bb2586a3b1c21e7e"} Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.502007 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-2q49p"] Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.504326 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.519563 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2q49p"] Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.570495 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.612466 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.612617 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-config\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.612676 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-dns-svc\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.612718 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5xnm\" (UniqueName: \"kubernetes.io/projected/e03032a6-a757-4270-bd4f-b84532ffcb4b-kube-api-access-x5xnm\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.612764 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.715097 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.715259 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-config\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.715318 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-dns-svc\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.715346 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5xnm\" (UniqueName: \"kubernetes.io/projected/e03032a6-a757-4270-bd4f-b84532ffcb4b-kube-api-access-x5xnm\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.715405 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.716509 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.716594 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-config\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.716766 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.717557 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-dns-svc\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.742504 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5xnm\" (UniqueName: \"kubernetes.io/projected/e03032a6-a757-4270-bd4f-b84532ffcb4b-kube-api-access-x5xnm\") pod \"dnsmasq-dns-698758b865-2q49p\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:06 crc kubenswrapper[4949]: I0216 11:29:06.840380 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.407373 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2q49p"] Feb 16 11:29:07 crc kubenswrapper[4949]: W0216 11:29:07.408006 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode03032a6_a757_4270_bd4f_b84532ffcb4b.slice/crio-f271253b6b3cbd166a0bb136ee89ef27a7ab04515cbab3a4dd0f8de6dedfe9df WatchSource:0}: Error finding container f271253b6b3cbd166a0bb136ee89ef27a7ab04515cbab3a4dd0f8de6dedfe9df: Status 404 returned error can't find the container with id f271253b6b3cbd166a0bb136ee89ef27a7ab04515cbab3a4dd0f8de6dedfe9df Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.576408 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" podUID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: connect: connection refused" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.684947 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.695501 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.697115 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.698562 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.698789 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-qxpfw" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.698935 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.699322 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.858772 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b8fafaac-cbaa-4726-91b8-b0739034455f-cache\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.858873 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b8fafaac-cbaa-4726-91b8-b0739034455f-lock\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.858962 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.858997 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8fafaac-cbaa-4726-91b8-b0739034455f-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.859046 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-eec050c7-286a-4bac-8354-3c08ccb33204\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-eec050c7-286a-4bac-8354-3c08ccb33204\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.859080 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fwcc\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-kube-api-access-5fwcc\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.970863 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.970959 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8fafaac-cbaa-4726-91b8-b0739034455f-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.971008 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-eec050c7-286a-4bac-8354-3c08ccb33204\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-eec050c7-286a-4bac-8354-3c08ccb33204\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.971050 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fwcc\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-kube-api-access-5fwcc\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.971254 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b8fafaac-cbaa-4726-91b8-b0739034455f-cache\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.971449 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b8fafaac-cbaa-4726-91b8-b0739034455f-lock\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.972139 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b8fafaac-cbaa-4726-91b8-b0739034455f-lock\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:07 crc kubenswrapper[4949]: E0216 11:29:07.977610 4949 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 16 11:29:07 crc kubenswrapper[4949]: E0216 11:29:07.977647 4949 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 16 11:29:07 crc kubenswrapper[4949]: E0216 11:29:07.977717 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift podName:b8fafaac-cbaa-4726-91b8-b0739034455f nodeName:}" failed. No retries permitted until 2026-02-16 11:29:08.477685047 +0000 UTC m=+1338.107019212 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift") pod "swift-storage-0" (UID: "b8fafaac-cbaa-4726-91b8-b0739034455f") : configmap "swift-ring-files" not found Feb 16 11:29:07 crc kubenswrapper[4949]: I0216 11:29:07.979059 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b8fafaac-cbaa-4726-91b8-b0739034455f-cache\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.010775 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8fafaac-cbaa-4726-91b8-b0739034455f-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.034560 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fwcc\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-kube-api-access-5fwcc\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.130413 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.130462 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-eec050c7-286a-4bac-8354-3c08ccb33204\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-eec050c7-286a-4bac-8354-3c08ccb33204\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/12aeb3bd7216d109e26c3111f4970c2a0140e86215cfd870e6faafd2be6ed613/globalmount\"" pod="openstack/swift-storage-0" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.165023 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.273368 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-nvcsr"] Feb 16 11:29:08 crc kubenswrapper[4949]: E0216 11:29:08.273981 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerName="init" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.274006 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerName="init" Feb 16 11:29:08 crc kubenswrapper[4949]: E0216 11:29:08.274031 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerName="dnsmasq-dns" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.274038 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerName="dnsmasq-dns" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.274277 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" containerName="dnsmasq-dns" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.275102 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.281510 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.285632 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.285955 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.287553 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59f89\" (UniqueName: \"kubernetes.io/projected/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-kube-api-access-59f89\") pod \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.287721 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-dns-svc\") pod \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.288039 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-ovsdbserver-nb\") pod \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.288322 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-config\") pod \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\" (UID: \"d4a2be70-f0e5-4f39-8b5e-744b868a47ad\") " Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.309064 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerStarted","Data":"8a744026cd1e3a9a8a75a94fced472e587930e54ef454c1938f4129de226cd1c"} Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.312620 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-nvcsr"] Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.324534 4949 generic.go:334] "Generic (PLEG): container finished" podID="e03032a6-a757-4270-bd4f-b84532ffcb4b" containerID="673c05b826e22c4a9c97f9ffe8b8188ffc64e0296990a49e1581aa341b58fd4b" exitCode=0 Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.324623 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2q49p" event={"ID":"e03032a6-a757-4270-bd4f-b84532ffcb4b","Type":"ContainerDied","Data":"673c05b826e22c4a9c97f9ffe8b8188ffc64e0296990a49e1581aa341b58fd4b"} Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.324668 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2q49p" event={"ID":"e03032a6-a757-4270-bd4f-b84532ffcb4b","Type":"ContainerStarted","Data":"f271253b6b3cbd166a0bb136ee89ef27a7ab04515cbab3a4dd0f8de6dedfe9df"} Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.324685 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-kube-api-access-59f89" (OuterVolumeSpecName: "kube-api-access-59f89") pod "d4a2be70-f0e5-4f39-8b5e-744b868a47ad" (UID: "d4a2be70-f0e5-4f39-8b5e-744b868a47ad"). InnerVolumeSpecName "kube-api-access-59f89". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.351032 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" event={"ID":"d4a2be70-f0e5-4f39-8b5e-744b868a47ad","Type":"ContainerDied","Data":"0cd2aae14337491d73d6386794a2cad25c3db227d7301e3f341d8be48279d120"} Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.351312 4949 scope.go:117] "RemoveContainer" containerID="2148a9357911a282d7199e1a05531344f0da39d0dc0cfa73bb2586a3b1c21e7e" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.352010 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-w9phd" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.400027 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69hxz\" (UniqueName: \"kubernetes.io/projected/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-kube-api-access-69hxz\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.400147 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-ring-data-devices\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.400164 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-combined-ca-bundle\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.400223 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-dispersionconf\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.400255 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-scripts\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.400282 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-etc-swift\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.400388 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-swiftconf\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.400492 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59f89\" (UniqueName: \"kubernetes.io/projected/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-kube-api-access-59f89\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.419783 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d4a2be70-f0e5-4f39-8b5e-744b868a47ad" (UID: "d4a2be70-f0e5-4f39-8b5e-744b868a47ad"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.443934 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d4a2be70-f0e5-4f39-8b5e-744b868a47ad" (UID: "d4a2be70-f0e5-4f39-8b5e-744b868a47ad"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.480527 4949 scope.go:117] "RemoveContainer" containerID="911cded894ec173a8503d795051bfda7229347e75364329d47657b4ecf323c84" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.502695 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69hxz\" (UniqueName: \"kubernetes.io/projected/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-kube-api-access-69hxz\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.502789 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-ring-data-devices\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.502808 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-combined-ca-bundle\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.502846 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-dispersionconf\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.502878 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-scripts\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.502922 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-etc-swift\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.503048 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.503101 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-swiftconf\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.503233 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.503248 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.505287 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-ring-data-devices\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: E0216 11:29:08.505588 4949 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 16 11:29:08 crc kubenswrapper[4949]: E0216 11:29:08.505620 4949 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 16 11:29:08 crc kubenswrapper[4949]: E0216 11:29:08.505680 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift podName:b8fafaac-cbaa-4726-91b8-b0739034455f nodeName:}" failed. No retries permitted until 2026-02-16 11:29:09.505655395 +0000 UTC m=+1339.134989550 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift") pod "swift-storage-0" (UID: "b8fafaac-cbaa-4726-91b8-b0739034455f") : configmap "swift-ring-files" not found Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.507564 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-scripts\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.508431 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-swiftconf\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.508620 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-etc-swift\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.508824 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-combined-ca-bundle\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.512467 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-dispersionconf\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.525428 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69hxz\" (UniqueName: \"kubernetes.io/projected/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-kube-api-access-69hxz\") pod \"swift-ring-rebalance-nvcsr\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.541018 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-config" (OuterVolumeSpecName: "config") pod "d4a2be70-f0e5-4f39-8b5e-744b868a47ad" (UID: "d4a2be70-f0e5-4f39-8b5e-744b868a47ad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.548748 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-eec050c7-286a-4bac-8354-3c08ccb33204\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-eec050c7-286a-4bac-8354-3c08ccb33204\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.605645 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4a2be70-f0e5-4f39-8b5e-744b868a47ad-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.696827 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-w9phd"] Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.708592 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-w9phd"] Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.719071 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.770746 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:08 crc kubenswrapper[4949]: I0216 11:29:08.826466 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Feb 16 11:29:09 crc kubenswrapper[4949]: I0216 11:29:09.256015 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4a2be70-f0e5-4f39-8b5e-744b868a47ad" path="/var/lib/kubelet/pods/d4a2be70-f0e5-4f39-8b5e-744b868a47ad/volumes" Feb 16 11:29:09 crc kubenswrapper[4949]: W0216 11:29:09.319515 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3aa21f38_ab8c_47b9_9ef0_f879e28eb01f.slice/crio-605bc19337b4b1148af57750e8b5a2734734e38e9fcee4b136af32fa40ce3cc7 WatchSource:0}: Error finding container 605bc19337b4b1148af57750e8b5a2734734e38e9fcee4b136af32fa40ce3cc7: Status 404 returned error can't find the container with id 605bc19337b4b1148af57750e8b5a2734734e38e9fcee4b136af32fa40ce3cc7 Feb 16 11:29:09 crc kubenswrapper[4949]: I0216 11:29:09.320364 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-nvcsr"] Feb 16 11:29:09 crc kubenswrapper[4949]: I0216 11:29:09.322899 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:29:09 crc kubenswrapper[4949]: I0216 11:29:09.360776 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2q49p" event={"ID":"e03032a6-a757-4270-bd4f-b84532ffcb4b","Type":"ContainerStarted","Data":"25db854f1d2f4f82868e63729e2a554a66b6f0c26bf15931e5b85827ce96be97"} Feb 16 11:29:09 crc kubenswrapper[4949]: I0216 11:29:09.361807 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:09 crc kubenswrapper[4949]: I0216 11:29:09.363657 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-nvcsr" event={"ID":"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f","Type":"ContainerStarted","Data":"605bc19337b4b1148af57750e8b5a2734734e38e9fcee4b136af32fa40ce3cc7"} Feb 16 11:29:09 crc kubenswrapper[4949]: I0216 11:29:09.391433 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-2q49p" podStartSLOduration=3.391409918 podStartE2EDuration="3.391409918s" podCreationTimestamp="2026-02-16 11:29:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:09.38303692 +0000 UTC m=+1339.012371095" watchObservedRunningTime="2026-02-16 11:29:09.391409918 +0000 UTC m=+1339.020744083" Feb 16 11:29:09 crc kubenswrapper[4949]: I0216 11:29:09.527643 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:09 crc kubenswrapper[4949]: E0216 11:29:09.528038 4949 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 16 11:29:09 crc kubenswrapper[4949]: E0216 11:29:09.528083 4949 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 16 11:29:09 crc kubenswrapper[4949]: E0216 11:29:09.528140 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift podName:b8fafaac-cbaa-4726-91b8-b0739034455f nodeName:}" failed. No retries permitted until 2026-02-16 11:29:11.528118977 +0000 UTC m=+1341.157453292 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift") pod "swift-storage-0" (UID: "b8fafaac-cbaa-4726-91b8-b0739034455f") : configmap "swift-ring-files" not found Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.696554 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-ft6d5"] Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.699927 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.708765 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.711339 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-ft6d5"] Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.756111 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwff8\" (UniqueName: \"kubernetes.io/projected/bac7946d-0613-402a-911d-a3a7f2ca6d5e-kube-api-access-hwff8\") pod \"root-account-create-update-ft6d5\" (UID: \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\") " pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.756319 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac7946d-0613-402a-911d-a3a7f2ca6d5e-operator-scripts\") pod \"root-account-create-update-ft6d5\" (UID: \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\") " pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.858853 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac7946d-0613-402a-911d-a3a7f2ca6d5e-operator-scripts\") pod \"root-account-create-update-ft6d5\" (UID: \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\") " pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.859132 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwff8\" (UniqueName: \"kubernetes.io/projected/bac7946d-0613-402a-911d-a3a7f2ca6d5e-kube-api-access-hwff8\") pod \"root-account-create-update-ft6d5\" (UID: \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\") " pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.860496 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac7946d-0613-402a-911d-a3a7f2ca6d5e-operator-scripts\") pod \"root-account-create-update-ft6d5\" (UID: \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\") " pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:10 crc kubenswrapper[4949]: I0216 11:29:10.887156 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwff8\" (UniqueName: \"kubernetes.io/projected/bac7946d-0613-402a-911d-a3a7f2ca6d5e-kube-api-access-hwff8\") pod \"root-account-create-update-ft6d5\" (UID: \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\") " pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:11 crc kubenswrapper[4949]: I0216 11:29:11.028823 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:11 crc kubenswrapper[4949]: I0216 11:29:11.579378 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:11 crc kubenswrapper[4949]: E0216 11:29:11.579577 4949 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 16 11:29:11 crc kubenswrapper[4949]: E0216 11:29:11.579596 4949 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 16 11:29:11 crc kubenswrapper[4949]: E0216 11:29:11.579655 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift podName:b8fafaac-cbaa-4726-91b8-b0739034455f nodeName:}" failed. No retries permitted until 2026-02-16 11:29:15.579636759 +0000 UTC m=+1345.208970924 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift") pod "swift-storage-0" (UID: "b8fafaac-cbaa-4726-91b8-b0739034455f") : configmap "swift-ring-files" not found Feb 16 11:29:11 crc kubenswrapper[4949]: I0216 11:29:11.766283 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Feb 16 11:29:11 crc kubenswrapper[4949]: I0216 11:29:11.860055 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Feb 16 11:29:13 crc kubenswrapper[4949]: I0216 11:29:13.411397 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerID="8a744026cd1e3a9a8a75a94fced472e587930e54ef454c1938f4129de226cd1c" exitCode=0 Feb 16 11:29:13 crc kubenswrapper[4949]: I0216 11:29:13.411741 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerDied","Data":"8a744026cd1e3a9a8a75a94fced472e587930e54ef454c1938f4129de226cd1c"} Feb 16 11:29:14 crc kubenswrapper[4949]: W0216 11:29:13.999631 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbac7946d_0613_402a_911d_a3a7f2ca6d5e.slice/crio-a61d0423d91bc9fb4f0c01ad370e49ca9246cc103ee0d4ab33cbf2ee620d6678 WatchSource:0}: Error finding container a61d0423d91bc9fb4f0c01ad370e49ca9246cc103ee0d4ab33cbf2ee620d6678: Status 404 returned error can't find the container with id a61d0423d91bc9fb4f0c01ad370e49ca9246cc103ee0d4ab33cbf2ee620d6678 Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.001058 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-ft6d5"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.346958 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-p2zsr"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.348864 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.360950 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-p2zsr"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.369417 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-operator-scripts\") pod \"glance-db-create-p2zsr\" (UID: \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\") " pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.369497 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlqlr\" (UniqueName: \"kubernetes.io/projected/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-kube-api-access-tlqlr\") pod \"glance-db-create-p2zsr\" (UID: \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\") " pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.437775 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-nvcsr" event={"ID":"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f","Type":"ContainerStarted","Data":"e3f23244327b9e5c8410f1c2b2ec950a72580cedcec271f4603fc1adcb2aaeaf"} Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.449089 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ft6d5" event={"ID":"bac7946d-0613-402a-911d-a3a7f2ca6d5e","Type":"ContainerStarted","Data":"ddb331e8524da19fc18eb470ff5dd75b4ca4898845ffdaef8d31c8d24ec537a8"} Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.449354 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ft6d5" event={"ID":"bac7946d-0613-402a-911d-a3a7f2ca6d5e","Type":"ContainerStarted","Data":"a61d0423d91bc9fb4f0c01ad370e49ca9246cc103ee0d4ab33cbf2ee620d6678"} Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.473480 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-operator-scripts\") pod \"glance-db-create-p2zsr\" (UID: \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\") " pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.475095 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlqlr\" (UniqueName: \"kubernetes.io/projected/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-kube-api-access-tlqlr\") pod \"glance-db-create-p2zsr\" (UID: \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\") " pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.474975 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-operator-scripts\") pod \"glance-db-create-p2zsr\" (UID: \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\") " pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.483800 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-1a86-account-create-update-2sw8c"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.486628 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.489002 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.495229 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1a86-account-create-update-2sw8c"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.498687 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlqlr\" (UniqueName: \"kubernetes.io/projected/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-kube-api-access-tlqlr\") pod \"glance-db-create-p2zsr\" (UID: \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\") " pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.512897 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-nvcsr" podStartSLOduration=2.229977205 podStartE2EDuration="6.512864837s" podCreationTimestamp="2026-02-16 11:29:08 +0000 UTC" firstStartedPulling="2026-02-16 11:29:09.322630007 +0000 UTC m=+1338.951964172" lastFinishedPulling="2026-02-16 11:29:13.605517639 +0000 UTC m=+1343.234851804" observedRunningTime="2026-02-16 11:29:14.467505974 +0000 UTC m=+1344.096840129" watchObservedRunningTime="2026-02-16 11:29:14.512864837 +0000 UTC m=+1344.142198992" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.528716 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-ft6d5" podStartSLOduration=4.528688879 podStartE2EDuration="4.528688879s" podCreationTimestamp="2026-02-16 11:29:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:14.484938061 +0000 UTC m=+1344.114272226" watchObservedRunningTime="2026-02-16 11:29:14.528688879 +0000 UTC m=+1344.158023044" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.577877 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr6lx\" (UniqueName: \"kubernetes.io/projected/8068ac9c-ae28-47f4-af20-1565bb98c28f-kube-api-access-lr6lx\") pod \"glance-1a86-account-create-update-2sw8c\" (UID: \"8068ac9c-ae28-47f4-af20-1565bb98c28f\") " pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.578573 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8068ac9c-ae28-47f4-af20-1565bb98c28f-operator-scripts\") pod \"glance-1a86-account-create-update-2sw8c\" (UID: \"8068ac9c-ae28-47f4-af20-1565bb98c28f\") " pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.652383 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-w6p2b"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.654467 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.662442 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-w6p2b"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.681606 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p79t6\" (UniqueName: \"kubernetes.io/projected/0686a1d4-34d8-48ed-8e05-14f6e2b65462-kube-api-access-p79t6\") pod \"keystone-db-create-w6p2b\" (UID: \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\") " pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.681675 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0686a1d4-34d8-48ed-8e05-14f6e2b65462-operator-scripts\") pod \"keystone-db-create-w6p2b\" (UID: \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\") " pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.681739 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr6lx\" (UniqueName: \"kubernetes.io/projected/8068ac9c-ae28-47f4-af20-1565bb98c28f-kube-api-access-lr6lx\") pod \"glance-1a86-account-create-update-2sw8c\" (UID: \"8068ac9c-ae28-47f4-af20-1565bb98c28f\") " pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.681868 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8068ac9c-ae28-47f4-af20-1565bb98c28f-operator-scripts\") pod \"glance-1a86-account-create-update-2sw8c\" (UID: \"8068ac9c-ae28-47f4-af20-1565bb98c28f\") " pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.682936 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8068ac9c-ae28-47f4-af20-1565bb98c28f-operator-scripts\") pod \"glance-1a86-account-create-update-2sw8c\" (UID: \"8068ac9c-ae28-47f4-af20-1565bb98c28f\") " pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.683181 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.701983 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr6lx\" (UniqueName: \"kubernetes.io/projected/8068ac9c-ae28-47f4-af20-1565bb98c28f-kube-api-access-lr6lx\") pod \"glance-1a86-account-create-update-2sw8c\" (UID: \"8068ac9c-ae28-47f4-af20-1565bb98c28f\") " pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.751123 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-43c4-account-create-update-jnnqw"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.755669 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.759343 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.770843 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-43c4-account-create-update-jnnqw"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.794041 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjpbh\" (UniqueName: \"kubernetes.io/projected/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-kube-api-access-zjpbh\") pod \"keystone-43c4-account-create-update-jnnqw\" (UID: \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\") " pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.794237 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p79t6\" (UniqueName: \"kubernetes.io/projected/0686a1d4-34d8-48ed-8e05-14f6e2b65462-kube-api-access-p79t6\") pod \"keystone-db-create-w6p2b\" (UID: \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\") " pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.794288 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0686a1d4-34d8-48ed-8e05-14f6e2b65462-operator-scripts\") pod \"keystone-db-create-w6p2b\" (UID: \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\") " pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.794342 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-operator-scripts\") pod \"keystone-43c4-account-create-update-jnnqw\" (UID: \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\") " pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.795806 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0686a1d4-34d8-48ed-8e05-14f6e2b65462-operator-scripts\") pod \"keystone-db-create-w6p2b\" (UID: \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\") " pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.827725 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p79t6\" (UniqueName: \"kubernetes.io/projected/0686a1d4-34d8-48ed-8e05-14f6e2b65462-kube-api-access-p79t6\") pod \"keystone-db-create-w6p2b\" (UID: \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\") " pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.865046 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.866381 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-vmnzk"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.868782 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.876721 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-vmnzk"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.896919 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjpbh\" (UniqueName: \"kubernetes.io/projected/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-kube-api-access-zjpbh\") pod \"keystone-43c4-account-create-update-jnnqw\" (UID: \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\") " pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.897129 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94qdb\" (UniqueName: \"kubernetes.io/projected/7118450d-767c-4539-801a-2ab0b5715487-kube-api-access-94qdb\") pod \"placement-db-create-vmnzk\" (UID: \"7118450d-767c-4539-801a-2ab0b5715487\") " pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.897285 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7118450d-767c-4539-801a-2ab0b5715487-operator-scripts\") pod \"placement-db-create-vmnzk\" (UID: \"7118450d-767c-4539-801a-2ab0b5715487\") " pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.897498 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-operator-scripts\") pod \"keystone-43c4-account-create-update-jnnqw\" (UID: \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\") " pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.902546 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-operator-scripts\") pod \"keystone-43c4-account-create-update-jnnqw\" (UID: \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\") " pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.950022 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjpbh\" (UniqueName: \"kubernetes.io/projected/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-kube-api-access-zjpbh\") pod \"keystone-43c4-account-create-update-jnnqw\" (UID: \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\") " pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.961301 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-29db-account-create-update-sk7mb"] Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.966366 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.968764 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.973351 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:14 crc kubenswrapper[4949]: I0216 11:29:14.987507 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-29db-account-create-update-sk7mb"] Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.007106 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqc6r\" (UniqueName: \"kubernetes.io/projected/25f93b07-c059-4191-9ad3-7cb42412b2fb-kube-api-access-fqc6r\") pod \"placement-29db-account-create-update-sk7mb\" (UID: \"25f93b07-c059-4191-9ad3-7cb42412b2fb\") " pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.007198 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94qdb\" (UniqueName: \"kubernetes.io/projected/7118450d-767c-4539-801a-2ab0b5715487-kube-api-access-94qdb\") pod \"placement-db-create-vmnzk\" (UID: \"7118450d-767c-4539-801a-2ab0b5715487\") " pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.007224 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f93b07-c059-4191-9ad3-7cb42412b2fb-operator-scripts\") pod \"placement-29db-account-create-update-sk7mb\" (UID: \"25f93b07-c059-4191-9ad3-7cb42412b2fb\") " pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.007251 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7118450d-767c-4539-801a-2ab0b5715487-operator-scripts\") pod \"placement-db-create-vmnzk\" (UID: \"7118450d-767c-4539-801a-2ab0b5715487\") " pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.008281 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7118450d-767c-4539-801a-2ab0b5715487-operator-scripts\") pod \"placement-db-create-vmnzk\" (UID: \"7118450d-767c-4539-801a-2ab0b5715487\") " pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.023909 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.040953 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94qdb\" (UniqueName: \"kubernetes.io/projected/7118450d-767c-4539-801a-2ab0b5715487-kube-api-access-94qdb\") pod \"placement-db-create-vmnzk\" (UID: \"7118450d-767c-4539-801a-2ab0b5715487\") " pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.110305 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqc6r\" (UniqueName: \"kubernetes.io/projected/25f93b07-c059-4191-9ad3-7cb42412b2fb-kube-api-access-fqc6r\") pod \"placement-29db-account-create-update-sk7mb\" (UID: \"25f93b07-c059-4191-9ad3-7cb42412b2fb\") " pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.110402 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f93b07-c059-4191-9ad3-7cb42412b2fb-operator-scripts\") pod \"placement-29db-account-create-update-sk7mb\" (UID: \"25f93b07-c059-4191-9ad3-7cb42412b2fb\") " pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.112045 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f93b07-c059-4191-9ad3-7cb42412b2fb-operator-scripts\") pod \"placement-29db-account-create-update-sk7mb\" (UID: \"25f93b07-c059-4191-9ad3-7cb42412b2fb\") " pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.134079 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqc6r\" (UniqueName: \"kubernetes.io/projected/25f93b07-c059-4191-9ad3-7cb42412b2fb-kube-api-access-fqc6r\") pod \"placement-29db-account-create-update-sk7mb\" (UID: \"25f93b07-c059-4191-9ad3-7cb42412b2fb\") " pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.295772 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.308533 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.509950 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-p2zsr"] Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.517506 4949 generic.go:334] "Generic (PLEG): container finished" podID="bac7946d-0613-402a-911d-a3a7f2ca6d5e" containerID="ddb331e8524da19fc18eb470ff5dd75b4ca4898845ffdaef8d31c8d24ec537a8" exitCode=0 Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.519575 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ft6d5" event={"ID":"bac7946d-0613-402a-911d-a3a7f2ca6d5e","Type":"ContainerDied","Data":"ddb331e8524da19fc18eb470ff5dd75b4ca4898845ffdaef8d31c8d24ec537a8"} Feb 16 11:29:15 crc kubenswrapper[4949]: W0216 11:29:15.548770 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fbeb10d_7447_462b_8a2b_b68d715a7d9e.slice/crio-562124f518d83138a18aed17c259321105d7ea8b49d43a5a8e4813d5f97d908d WatchSource:0}: Error finding container 562124f518d83138a18aed17c259321105d7ea8b49d43a5a8e4813d5f97d908d: Status 404 returned error can't find the container with id 562124f518d83138a18aed17c259321105d7ea8b49d43a5a8e4813d5f97d908d Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.626615 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:15 crc kubenswrapper[4949]: E0216 11:29:15.627658 4949 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 16 11:29:15 crc kubenswrapper[4949]: E0216 11:29:15.627685 4949 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 16 11:29:15 crc kubenswrapper[4949]: E0216 11:29:15.627737 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift podName:b8fafaac-cbaa-4726-91b8-b0739034455f nodeName:}" failed. No retries permitted until 2026-02-16 11:29:23.627718173 +0000 UTC m=+1353.257052338 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift") pod "swift-storage-0" (UID: "b8fafaac-cbaa-4726-91b8-b0739034455f") : configmap "swift-ring-files" not found Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.634273 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-43c4-account-create-update-jnnqw"] Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.660549 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1a86-account-create-update-2sw8c"] Feb 16 11:29:15 crc kubenswrapper[4949]: W0216 11:29:15.662553 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd81d6ce5_367a_40d2_af0c_c2e88c48bc4e.slice/crio-44e0739020ece18240822411c594e7b8d4196a70a383f4e414628cceebd75180 WatchSource:0}: Error finding container 44e0739020ece18240822411c594e7b8d4196a70a383f4e414628cceebd75180: Status 404 returned error can't find the container with id 44e0739020ece18240822411c594e7b8d4196a70a383f4e414628cceebd75180 Feb 16 11:29:15 crc kubenswrapper[4949]: I0216 11:29:15.812853 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-w6p2b"] Feb 16 11:29:15 crc kubenswrapper[4949]: W0216 11:29:15.819099 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0686a1d4_34d8_48ed_8e05_14f6e2b65462.slice/crio-1ada10bed2f39693c40f6cc764235dda6e91c4b41b6066977be8d14b20215646 WatchSource:0}: Error finding container 1ada10bed2f39693c40f6cc764235dda6e91c4b41b6066977be8d14b20215646: Status 404 returned error can't find the container with id 1ada10bed2f39693c40f6cc764235dda6e91c4b41b6066977be8d14b20215646 Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.046040 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-vmnzk"] Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.064308 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-29db-account-create-update-sk7mb"] Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.088893 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.343861 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-mfr7w"] Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.345879 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-mfr7w"] Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.345978 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.471567 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcfkg\" (UniqueName: \"kubernetes.io/projected/9e3f4b81-c950-440a-a85a-6769c6affd08-kube-api-access-qcfkg\") pod \"mysqld-exporter-openstack-db-create-mfr7w\" (UID: \"9e3f4b81-c950-440a-a85a-6769c6affd08\") " pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.472021 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e3f4b81-c950-440a-a85a-6769c6affd08-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-mfr7w\" (UID: \"9e3f4b81-c950-440a-a85a-6769c6affd08\") " pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.544521 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-29db-account-create-update-sk7mb" event={"ID":"25f93b07-c059-4191-9ad3-7cb42412b2fb","Type":"ContainerStarted","Data":"211324f1a27603e7c9df9b512272f9fd3a3fd9f4fbea9ae533f065a06813c2b8"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.544599 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-29db-account-create-update-sk7mb" event={"ID":"25f93b07-c059-4191-9ad3-7cb42412b2fb","Type":"ContainerStarted","Data":"b3466e6dabb67eff8ed90844756b5c4ac5f2b25e160e135f9f5dae7d2e262b5d"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.549500 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-p2zsr" event={"ID":"0fbeb10d-7447-462b-8a2b-b68d715a7d9e","Type":"ContainerStarted","Data":"361c9940063923e9aed905955978713e646bee3372d7ee732af6a0fe7cffdf45"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.549565 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-p2zsr" event={"ID":"0fbeb10d-7447-462b-8a2b-b68d715a7d9e","Type":"ContainerStarted","Data":"562124f518d83138a18aed17c259321105d7ea8b49d43a5a8e4813d5f97d908d"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.551126 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-ecac-account-create-update-kqvjh"] Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.552966 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.557550 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.563651 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-ecac-account-create-update-kqvjh"] Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.571825 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1a86-account-create-update-2sw8c" event={"ID":"8068ac9c-ae28-47f4-af20-1565bb98c28f","Type":"ContainerStarted","Data":"6ce5db53543acedd8939b98845a0bd6fad4f7b3ec6a77cad1fd22ce42423f158"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.571898 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1a86-account-create-update-2sw8c" event={"ID":"8068ac9c-ae28-47f4-af20-1565bb98c28f","Type":"ContainerStarted","Data":"5242237625d42cbb05f78cf6fb2759bd386f641b187276bbebf8978fde5e9d7b"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.576345 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcfkg\" (UniqueName: \"kubernetes.io/projected/9e3f4b81-c950-440a-a85a-6769c6affd08-kube-api-access-qcfkg\") pod \"mysqld-exporter-openstack-db-create-mfr7w\" (UID: \"9e3f4b81-c950-440a-a85a-6769c6affd08\") " pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.576488 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e3f4b81-c950-440a-a85a-6769c6affd08-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-mfr7w\" (UID: \"9e3f4b81-c950-440a-a85a-6769c6affd08\") " pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.577335 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e3f4b81-c950-440a-a85a-6769c6affd08-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-mfr7w\" (UID: \"9e3f4b81-c950-440a-a85a-6769c6affd08\") " pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.588881 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-29db-account-create-update-sk7mb" podStartSLOduration=2.588861986 podStartE2EDuration="2.588861986s" podCreationTimestamp="2026-02-16 11:29:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:16.574064604 +0000 UTC m=+1346.203398769" watchObservedRunningTime="2026-02-16 11:29:16.588861986 +0000 UTC m=+1346.218196151" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.622756 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-w6p2b" event={"ID":"0686a1d4-34d8-48ed-8e05-14f6e2b65462","Type":"ContainerStarted","Data":"46bef3d44c1e5dd8b60fc72b6cafe5174aac8067729859e639452edd51a24d2e"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.623155 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-w6p2b" event={"ID":"0686a1d4-34d8-48ed-8e05-14f6e2b65462","Type":"ContainerStarted","Data":"1ada10bed2f39693c40f6cc764235dda6e91c4b41b6066977be8d14b20215646"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.631214 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-43c4-account-create-update-jnnqw" event={"ID":"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e","Type":"ContainerStarted","Data":"ef2dd86f5dc586e7f1d85040d39b311aa3092def01e38437226c686f54dbb323"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.631265 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-43c4-account-create-update-jnnqw" event={"ID":"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e","Type":"ContainerStarted","Data":"44e0739020ece18240822411c594e7b8d4196a70a383f4e414628cceebd75180"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.640004 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcfkg\" (UniqueName: \"kubernetes.io/projected/9e3f4b81-c950-440a-a85a-6769c6affd08-kube-api-access-qcfkg\") pod \"mysqld-exporter-openstack-db-create-mfr7w\" (UID: \"9e3f4b81-c950-440a-a85a-6769c6affd08\") " pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.648141 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vmnzk" event={"ID":"7118450d-767c-4539-801a-2ab0b5715487","Type":"ContainerStarted","Data":"d6fd0474a0143aa44ea872486520519acb6d1eda44e8edd66128de36702edd56"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.648212 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vmnzk" event={"ID":"7118450d-767c-4539-801a-2ab0b5715487","Type":"ContainerStarted","Data":"c90ee7a5a2eb07e5eacf4f566446de6cccbc607881c54b56f1b4eb549614d131"} Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.671927 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-1a86-account-create-update-2sw8c" podStartSLOduration=2.671897945 podStartE2EDuration="2.671897945s" podCreationTimestamp="2026-02-16 11:29:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:16.612903912 +0000 UTC m=+1346.242238077" watchObservedRunningTime="2026-02-16 11:29:16.671897945 +0000 UTC m=+1346.301232110" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.679058 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99deca80-3672-484c-9902-878b7a51ac6a-operator-scripts\") pod \"mysqld-exporter-ecac-account-create-update-kqvjh\" (UID: \"99deca80-3672-484c-9902-878b7a51ac6a\") " pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.679155 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmpz7\" (UniqueName: \"kubernetes.io/projected/99deca80-3672-484c-9902-878b7a51ac6a-kube-api-access-wmpz7\") pod \"mysqld-exporter-ecac-account-create-update-kqvjh\" (UID: \"99deca80-3672-484c-9902-878b7a51ac6a\") " pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.704279 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.713985 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-p2zsr" podStartSLOduration=2.713950694 podStartE2EDuration="2.713950694s" podCreationTimestamp="2026-02-16 11:29:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:16.632909643 +0000 UTC m=+1346.262243808" watchObservedRunningTime="2026-02-16 11:29:16.713950694 +0000 UTC m=+1346.343284869" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.743011 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-vmnzk" podStartSLOduration=2.7429268 podStartE2EDuration="2.7429268s" podCreationTimestamp="2026-02-16 11:29:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:16.686728298 +0000 UTC m=+1346.316062483" watchObservedRunningTime="2026-02-16 11:29:16.7429268 +0000 UTC m=+1346.372260975" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.777656 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-w6p2b" podStartSLOduration=2.77762367 podStartE2EDuration="2.77762367s" podCreationTimestamp="2026-02-16 11:29:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:16.705437721 +0000 UTC m=+1346.334771886" watchObservedRunningTime="2026-02-16 11:29:16.77762367 +0000 UTC m=+1346.406957835" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.781794 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99deca80-3672-484c-9902-878b7a51ac6a-operator-scripts\") pod \"mysqld-exporter-ecac-account-create-update-kqvjh\" (UID: \"99deca80-3672-484c-9902-878b7a51ac6a\") " pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.781878 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmpz7\" (UniqueName: \"kubernetes.io/projected/99deca80-3672-484c-9902-878b7a51ac6a-kube-api-access-wmpz7\") pod \"mysqld-exporter-ecac-account-create-update-kqvjh\" (UID: \"99deca80-3672-484c-9902-878b7a51ac6a\") " pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.785090 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99deca80-3672-484c-9902-878b7a51ac6a-operator-scripts\") pod \"mysqld-exporter-ecac-account-create-update-kqvjh\" (UID: \"99deca80-3672-484c-9902-878b7a51ac6a\") " pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.795857 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-43c4-account-create-update-jnnqw" podStartSLOduration=2.7958294390000002 podStartE2EDuration="2.795829439s" podCreationTimestamp="2026-02-16 11:29:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:16.729486157 +0000 UTC m=+1346.358820322" watchObservedRunningTime="2026-02-16 11:29:16.795829439 +0000 UTC m=+1346.425163604" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.806698 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmpz7\" (UniqueName: \"kubernetes.io/projected/99deca80-3672-484c-9902-878b7a51ac6a-kube-api-access-wmpz7\") pod \"mysqld-exporter-ecac-account-create-update-kqvjh\" (UID: \"99deca80-3672-484c-9902-878b7a51ac6a\") " pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.842483 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.912662 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.938099 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-kslw5"] Feb 16 11:29:16 crc kubenswrapper[4949]: I0216 11:29:16.938688 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" podUID="9cba7e57-5348-4b72-bf94-43da9a6a504d" containerName="dnsmasq-dns" containerID="cri-o://cd2623268180de8b4383b823158db3c698c76e8cfb0120e6e2ea8cd841746ae4" gracePeriod=10 Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.253014 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.302524 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwff8\" (UniqueName: \"kubernetes.io/projected/bac7946d-0613-402a-911d-a3a7f2ca6d5e-kube-api-access-hwff8\") pod \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\" (UID: \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\") " Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.302626 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac7946d-0613-402a-911d-a3a7f2ca6d5e-operator-scripts\") pod \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\" (UID: \"bac7946d-0613-402a-911d-a3a7f2ca6d5e\") " Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.304126 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bac7946d-0613-402a-911d-a3a7f2ca6d5e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bac7946d-0613-402a-911d-a3a7f2ca6d5e" (UID: "bac7946d-0613-402a-911d-a3a7f2ca6d5e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.311024 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bac7946d-0613-402a-911d-a3a7f2ca6d5e-kube-api-access-hwff8" (OuterVolumeSpecName: "kube-api-access-hwff8") pod "bac7946d-0613-402a-911d-a3a7f2ca6d5e" (UID: "bac7946d-0613-402a-911d-a3a7f2ca6d5e"). InnerVolumeSpecName "kube-api-access-hwff8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.406374 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwff8\" (UniqueName: \"kubernetes.io/projected/bac7946d-0613-402a-911d-a3a7f2ca6d5e-kube-api-access-hwff8\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.406443 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac7946d-0613-402a-911d-a3a7f2ca6d5e-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.575584 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-mfr7w"] Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.690020 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-ecac-account-create-update-kqvjh"] Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.692773 4949 generic.go:334] "Generic (PLEG): container finished" podID="7118450d-767c-4539-801a-2ab0b5715487" containerID="d6fd0474a0143aa44ea872486520519acb6d1eda44e8edd66128de36702edd56" exitCode=0 Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.692864 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vmnzk" event={"ID":"7118450d-767c-4539-801a-2ab0b5715487","Type":"ContainerDied","Data":"d6fd0474a0143aa44ea872486520519acb6d1eda44e8edd66128de36702edd56"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.724667 4949 generic.go:334] "Generic (PLEG): container finished" podID="25f93b07-c059-4191-9ad3-7cb42412b2fb" containerID="211324f1a27603e7c9df9b512272f9fd3a3fd9f4fbea9ae533f065a06813c2b8" exitCode=0 Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.724762 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-29db-account-create-update-sk7mb" event={"ID":"25f93b07-c059-4191-9ad3-7cb42412b2fb","Type":"ContainerDied","Data":"211324f1a27603e7c9df9b512272f9fd3a3fd9f4fbea9ae533f065a06813c2b8"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.734862 4949 generic.go:334] "Generic (PLEG): container finished" podID="0686a1d4-34d8-48ed-8e05-14f6e2b65462" containerID="46bef3d44c1e5dd8b60fc72b6cafe5174aac8067729859e639452edd51a24d2e" exitCode=0 Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.734961 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-w6p2b" event={"ID":"0686a1d4-34d8-48ed-8e05-14f6e2b65462","Type":"ContainerDied","Data":"46bef3d44c1e5dd8b60fc72b6cafe5174aac8067729859e639452edd51a24d2e"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.739674 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" event={"ID":"9e3f4b81-c950-440a-a85a-6769c6affd08","Type":"ContainerStarted","Data":"c0a3627ba59562c193e274d54687aa2174f0f37c0bc3e3fe369a40d373e717c5"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.754183 4949 generic.go:334] "Generic (PLEG): container finished" podID="9cba7e57-5348-4b72-bf94-43da9a6a504d" containerID="cd2623268180de8b4383b823158db3c698c76e8cfb0120e6e2ea8cd841746ae4" exitCode=0 Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.754257 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" event={"ID":"9cba7e57-5348-4b72-bf94-43da9a6a504d","Type":"ContainerDied","Data":"cd2623268180de8b4383b823158db3c698c76e8cfb0120e6e2ea8cd841746ae4"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.754285 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" event={"ID":"9cba7e57-5348-4b72-bf94-43da9a6a504d","Type":"ContainerDied","Data":"88969d4d9c7d40b6e8b64b25a2e172d560714fe74140367390ee092af0acc553"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.754297 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88969d4d9c7d40b6e8b64b25a2e172d560714fe74140367390ee092af0acc553" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.755607 4949 generic.go:334] "Generic (PLEG): container finished" podID="0fbeb10d-7447-462b-8a2b-b68d715a7d9e" containerID="361c9940063923e9aed905955978713e646bee3372d7ee732af6a0fe7cffdf45" exitCode=0 Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.755667 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-p2zsr" event={"ID":"0fbeb10d-7447-462b-8a2b-b68d715a7d9e","Type":"ContainerDied","Data":"361c9940063923e9aed905955978713e646bee3372d7ee732af6a0fe7cffdf45"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.758466 4949 generic.go:334] "Generic (PLEG): container finished" podID="8068ac9c-ae28-47f4-af20-1565bb98c28f" containerID="6ce5db53543acedd8939b98845a0bd6fad4f7b3ec6a77cad1fd22ce42423f158" exitCode=0 Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.758622 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1a86-account-create-update-2sw8c" event={"ID":"8068ac9c-ae28-47f4-af20-1565bb98c28f","Type":"ContainerDied","Data":"6ce5db53543acedd8939b98845a0bd6fad4f7b3ec6a77cad1fd22ce42423f158"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.795346 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ft6d5" event={"ID":"bac7946d-0613-402a-911d-a3a7f2ca6d5e","Type":"ContainerDied","Data":"a61d0423d91bc9fb4f0c01ad370e49ca9246cc103ee0d4ab33cbf2ee620d6678"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.795388 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a61d0423d91bc9fb4f0c01ad370e49ca9246cc103ee0d4ab33cbf2ee620d6678" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.795466 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ft6d5" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.808963 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.816557 4949 generic.go:334] "Generic (PLEG): container finished" podID="d81d6ce5-367a-40d2-af0c-c2e88c48bc4e" containerID="ef2dd86f5dc586e7f1d85040d39b311aa3092def01e38437226c686f54dbb323" exitCode=0 Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.816976 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-43c4-account-create-update-jnnqw" event={"ID":"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e","Type":"ContainerDied","Data":"ef2dd86f5dc586e7f1d85040d39b311aa3092def01e38437226c686f54dbb323"} Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.924764 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-dns-svc\") pod \"9cba7e57-5348-4b72-bf94-43da9a6a504d\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.930295 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-nb\") pod \"9cba7e57-5348-4b72-bf94-43da9a6a504d\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.930604 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-config\") pod \"9cba7e57-5348-4b72-bf94-43da9a6a504d\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.930651 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l44vw\" (UniqueName: \"kubernetes.io/projected/9cba7e57-5348-4b72-bf94-43da9a6a504d-kube-api-access-l44vw\") pod \"9cba7e57-5348-4b72-bf94-43da9a6a504d\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.930685 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-sb\") pod \"9cba7e57-5348-4b72-bf94-43da9a6a504d\" (UID: \"9cba7e57-5348-4b72-bf94-43da9a6a504d\") " Feb 16 11:29:17 crc kubenswrapper[4949]: I0216 11:29:17.938882 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cba7e57-5348-4b72-bf94-43da9a6a504d-kube-api-access-l44vw" (OuterVolumeSpecName: "kube-api-access-l44vw") pod "9cba7e57-5348-4b72-bf94-43da9a6a504d" (UID: "9cba7e57-5348-4b72-bf94-43da9a6a504d"). InnerVolumeSpecName "kube-api-access-l44vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.033683 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l44vw\" (UniqueName: \"kubernetes.io/projected/9cba7e57-5348-4b72-bf94-43da9a6a504d-kube-api-access-l44vw\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.044825 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9cba7e57-5348-4b72-bf94-43da9a6a504d" (UID: "9cba7e57-5348-4b72-bf94-43da9a6a504d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.048038 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9cba7e57-5348-4b72-bf94-43da9a6a504d" (UID: "9cba7e57-5348-4b72-bf94-43da9a6a504d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.054878 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9cba7e57-5348-4b72-bf94-43da9a6a504d" (UID: "9cba7e57-5348-4b72-bf94-43da9a6a504d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.073969 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-config" (OuterVolumeSpecName: "config") pod "9cba7e57-5348-4b72-bf94-43da9a6a504d" (UID: "9cba7e57-5348-4b72-bf94-43da9a6a504d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.137390 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.137444 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.137482 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.137494 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cba7e57-5348-4b72-bf94-43da9a6a504d-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.832983 4949 generic.go:334] "Generic (PLEG): container finished" podID="99deca80-3672-484c-9902-878b7a51ac6a" containerID="779fd297f1a7b349d9a9cf2f7bf3b8e3116c795a103dfadb6d89d0424ef52cd1" exitCode=0 Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.833062 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" event={"ID":"99deca80-3672-484c-9902-878b7a51ac6a","Type":"ContainerDied","Data":"779fd297f1a7b349d9a9cf2f7bf3b8e3116c795a103dfadb6d89d0424ef52cd1"} Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.833465 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" event={"ID":"99deca80-3672-484c-9902-878b7a51ac6a","Type":"ContainerStarted","Data":"a7d4e8d7ab03a46f73084dcd97580323cc1a22492741042a12d6eaf702610526"} Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.836194 4949 generic.go:334] "Generic (PLEG): container finished" podID="9e3f4b81-c950-440a-a85a-6769c6affd08" containerID="679e18c9c9c39e98223301f119cf67e2b3940c45f32212ab957d447336da2b17" exitCode=0 Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.836296 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" event={"ID":"9e3f4b81-c950-440a-a85a-6769c6affd08","Type":"ContainerDied","Data":"679e18c9c9c39e98223301f119cf67e2b3940c45f32212ab957d447336da2b17"} Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.836408 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-kslw5" Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.948759 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-kslw5"] Feb 16 11:29:18 crc kubenswrapper[4949]: I0216 11:29:18.979146 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-kslw5"] Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.269080 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cba7e57-5348-4b72-bf94-43da9a6a504d" path="/var/lib/kubelet/pods/9cba7e57-5348-4b72-bf94-43da9a6a504d/volumes" Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.489866 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.577726 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqc6r\" (UniqueName: \"kubernetes.io/projected/25f93b07-c059-4191-9ad3-7cb42412b2fb-kube-api-access-fqc6r\") pod \"25f93b07-c059-4191-9ad3-7cb42412b2fb\" (UID: \"25f93b07-c059-4191-9ad3-7cb42412b2fb\") " Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.578137 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f93b07-c059-4191-9ad3-7cb42412b2fb-operator-scripts\") pod \"25f93b07-c059-4191-9ad3-7cb42412b2fb\" (UID: \"25f93b07-c059-4191-9ad3-7cb42412b2fb\") " Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.579432 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25f93b07-c059-4191-9ad3-7cb42412b2fb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "25f93b07-c059-4191-9ad3-7cb42412b2fb" (UID: "25f93b07-c059-4191-9ad3-7cb42412b2fb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.599204 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25f93b07-c059-4191-9ad3-7cb42412b2fb-kube-api-access-fqc6r" (OuterVolumeSpecName: "kube-api-access-fqc6r") pod "25f93b07-c059-4191-9ad3-7cb42412b2fb" (UID: "25f93b07-c059-4191-9ad3-7cb42412b2fb"). InnerVolumeSpecName "kube-api-access-fqc6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.681677 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f93b07-c059-4191-9ad3-7cb42412b2fb-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.681715 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqc6r\" (UniqueName: \"kubernetes.io/projected/25f93b07-c059-4191-9ad3-7cb42412b2fb-kube-api-access-fqc6r\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.867021 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-29db-account-create-update-sk7mb" event={"ID":"25f93b07-c059-4191-9ad3-7cb42412b2fb","Type":"ContainerDied","Data":"b3466e6dabb67eff8ed90844756b5c4ac5f2b25e160e135f9f5dae7d2e262b5d"} Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.867077 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-29db-account-create-update-sk7mb" Feb 16 11:29:19 crc kubenswrapper[4949]: I0216 11:29:19.867084 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3466e6dabb67eff8ed90844756b5c4ac5f2b25e160e135f9f5dae7d2e262b5d" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.014895 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.048680 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.055297 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.086961 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.088772 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.089798 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjpbh\" (UniqueName: \"kubernetes.io/projected/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-kube-api-access-zjpbh\") pod \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\" (UID: \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.089853 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8068ac9c-ae28-47f4-af20-1565bb98c28f-operator-scripts\") pod \"8068ac9c-ae28-47f4-af20-1565bb98c28f\" (UID: \"8068ac9c-ae28-47f4-af20-1565bb98c28f\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.090048 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p79t6\" (UniqueName: \"kubernetes.io/projected/0686a1d4-34d8-48ed-8e05-14f6e2b65462-kube-api-access-p79t6\") pod \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\" (UID: \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.090107 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lr6lx\" (UniqueName: \"kubernetes.io/projected/8068ac9c-ae28-47f4-af20-1565bb98c28f-kube-api-access-lr6lx\") pod \"8068ac9c-ae28-47f4-af20-1565bb98c28f\" (UID: \"8068ac9c-ae28-47f4-af20-1565bb98c28f\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.090186 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-operator-scripts\") pod \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\" (UID: \"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.090216 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0686a1d4-34d8-48ed-8e05-14f6e2b65462-operator-scripts\") pod \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\" (UID: \"0686a1d4-34d8-48ed-8e05-14f6e2b65462\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.091660 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0686a1d4-34d8-48ed-8e05-14f6e2b65462-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0686a1d4-34d8-48ed-8e05-14f6e2b65462" (UID: "0686a1d4-34d8-48ed-8e05-14f6e2b65462"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.092898 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8068ac9c-ae28-47f4-af20-1565bb98c28f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8068ac9c-ae28-47f4-af20-1565bb98c28f" (UID: "8068ac9c-ae28-47f4-af20-1565bb98c28f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.093128 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d81d6ce5-367a-40d2-af0c-c2e88c48bc4e" (UID: "d81d6ce5-367a-40d2-af0c-c2e88c48bc4e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.098108 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8068ac9c-ae28-47f4-af20-1565bb98c28f-kube-api-access-lr6lx" (OuterVolumeSpecName: "kube-api-access-lr6lx") pod "8068ac9c-ae28-47f4-af20-1565bb98c28f" (UID: "8068ac9c-ae28-47f4-af20-1565bb98c28f"). InnerVolumeSpecName "kube-api-access-lr6lx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.107399 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0686a1d4-34d8-48ed-8e05-14f6e2b65462-kube-api-access-p79t6" (OuterVolumeSpecName: "kube-api-access-p79t6") pod "0686a1d4-34d8-48ed-8e05-14f6e2b65462" (UID: "0686a1d4-34d8-48ed-8e05-14f6e2b65462"). InnerVolumeSpecName "kube-api-access-p79t6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.116710 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-kube-api-access-zjpbh" (OuterVolumeSpecName: "kube-api-access-zjpbh") pod "d81d6ce5-367a-40d2-af0c-c2e88c48bc4e" (UID: "d81d6ce5-367a-40d2-af0c-c2e88c48bc4e"). InnerVolumeSpecName "kube-api-access-zjpbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.191946 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94qdb\" (UniqueName: \"kubernetes.io/projected/7118450d-767c-4539-801a-2ab0b5715487-kube-api-access-94qdb\") pod \"7118450d-767c-4539-801a-2ab0b5715487\" (UID: \"7118450d-767c-4539-801a-2ab0b5715487\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.192086 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-operator-scripts\") pod \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\" (UID: \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.192132 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlqlr\" (UniqueName: \"kubernetes.io/projected/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-kube-api-access-tlqlr\") pod \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\" (UID: \"0fbeb10d-7447-462b-8a2b-b68d715a7d9e\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.192608 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7118450d-767c-4539-801a-2ab0b5715487-operator-scripts\") pod \"7118450d-767c-4539-801a-2ab0b5715487\" (UID: \"7118450d-767c-4539-801a-2ab0b5715487\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193247 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0fbeb10d-7447-462b-8a2b-b68d715a7d9e" (UID: "0fbeb10d-7447-462b-8a2b-b68d715a7d9e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193433 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7118450d-767c-4539-801a-2ab0b5715487-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7118450d-767c-4539-801a-2ab0b5715487" (UID: "7118450d-767c-4539-801a-2ab0b5715487"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193729 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjpbh\" (UniqueName: \"kubernetes.io/projected/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-kube-api-access-zjpbh\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193758 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8068ac9c-ae28-47f4-af20-1565bb98c28f-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193770 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7118450d-767c-4539-801a-2ab0b5715487-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193783 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p79t6\" (UniqueName: \"kubernetes.io/projected/0686a1d4-34d8-48ed-8e05-14f6e2b65462-kube-api-access-p79t6\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193795 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lr6lx\" (UniqueName: \"kubernetes.io/projected/8068ac9c-ae28-47f4-af20-1565bb98c28f-kube-api-access-lr6lx\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193806 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193819 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0686a1d4-34d8-48ed-8e05-14f6e2b65462-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.193833 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.203049 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7118450d-767c-4539-801a-2ab0b5715487-kube-api-access-94qdb" (OuterVolumeSpecName: "kube-api-access-94qdb") pod "7118450d-767c-4539-801a-2ab0b5715487" (UID: "7118450d-767c-4539-801a-2ab0b5715487"). InnerVolumeSpecName "kube-api-access-94qdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.209099 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-kube-api-access-tlqlr" (OuterVolumeSpecName: "kube-api-access-tlqlr") pod "0fbeb10d-7447-462b-8a2b-b68d715a7d9e" (UID: "0fbeb10d-7447-462b-8a2b-b68d715a7d9e"). InnerVolumeSpecName "kube-api-access-tlqlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.296548 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94qdb\" (UniqueName: \"kubernetes.io/projected/7118450d-767c-4539-801a-2ab0b5715487-kube-api-access-94qdb\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.296586 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlqlr\" (UniqueName: \"kubernetes.io/projected/0fbeb10d-7447-462b-8a2b-b68d715a7d9e-kube-api-access-tlqlr\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.385466 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.398506 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99deca80-3672-484c-9902-878b7a51ac6a-operator-scripts\") pod \"99deca80-3672-484c-9902-878b7a51ac6a\" (UID: \"99deca80-3672-484c-9902-878b7a51ac6a\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.398749 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmpz7\" (UniqueName: \"kubernetes.io/projected/99deca80-3672-484c-9902-878b7a51ac6a-kube-api-access-wmpz7\") pod \"99deca80-3672-484c-9902-878b7a51ac6a\" (UID: \"99deca80-3672-484c-9902-878b7a51ac6a\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.399936 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99deca80-3672-484c-9902-878b7a51ac6a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "99deca80-3672-484c-9902-878b7a51ac6a" (UID: "99deca80-3672-484c-9902-878b7a51ac6a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.402814 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99deca80-3672-484c-9902-878b7a51ac6a-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.407048 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.407254 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99deca80-3672-484c-9902-878b7a51ac6a-kube-api-access-wmpz7" (OuterVolumeSpecName: "kube-api-access-wmpz7") pod "99deca80-3672-484c-9902-878b7a51ac6a" (UID: "99deca80-3672-484c-9902-878b7a51ac6a"). InnerVolumeSpecName "kube-api-access-wmpz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.504626 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcfkg\" (UniqueName: \"kubernetes.io/projected/9e3f4b81-c950-440a-a85a-6769c6affd08-kube-api-access-qcfkg\") pod \"9e3f4b81-c950-440a-a85a-6769c6affd08\" (UID: \"9e3f4b81-c950-440a-a85a-6769c6affd08\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.505133 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e3f4b81-c950-440a-a85a-6769c6affd08-operator-scripts\") pod \"9e3f4b81-c950-440a-a85a-6769c6affd08\" (UID: \"9e3f4b81-c950-440a-a85a-6769c6affd08\") " Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.506728 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmpz7\" (UniqueName: \"kubernetes.io/projected/99deca80-3672-484c-9902-878b7a51ac6a-kube-api-access-wmpz7\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.507125 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e3f4b81-c950-440a-a85a-6769c6affd08-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9e3f4b81-c950-440a-a85a-6769c6affd08" (UID: "9e3f4b81-c950-440a-a85a-6769c6affd08"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.507852 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e3f4b81-c950-440a-a85a-6769c6affd08-kube-api-access-qcfkg" (OuterVolumeSpecName: "kube-api-access-qcfkg") pod "9e3f4b81-c950-440a-a85a-6769c6affd08" (UID: "9e3f4b81-c950-440a-a85a-6769c6affd08"). InnerVolumeSpecName "kube-api-access-qcfkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.608956 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcfkg\" (UniqueName: \"kubernetes.io/projected/9e3f4b81-c950-440a-a85a-6769c6affd08-kube-api-access-qcfkg\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.609004 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e3f4b81-c950-440a-a85a-6769c6affd08-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.882088 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-p2zsr" event={"ID":"0fbeb10d-7447-462b-8a2b-b68d715a7d9e","Type":"ContainerDied","Data":"562124f518d83138a18aed17c259321105d7ea8b49d43a5a8e4813d5f97d908d"} Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.884451 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="562124f518d83138a18aed17c259321105d7ea8b49d43a5a8e4813d5f97d908d" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.884405 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-p2zsr" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.887420 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1a86-account-create-update-2sw8c" event={"ID":"8068ac9c-ae28-47f4-af20-1565bb98c28f","Type":"ContainerDied","Data":"5242237625d42cbb05f78cf6fb2759bd386f641b187276bbebf8978fde5e9d7b"} Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.887471 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5242237625d42cbb05f78cf6fb2759bd386f641b187276bbebf8978fde5e9d7b" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.887539 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1a86-account-create-update-2sw8c" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.894248 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-w6p2b" event={"ID":"0686a1d4-34d8-48ed-8e05-14f6e2b65462","Type":"ContainerDied","Data":"1ada10bed2f39693c40f6cc764235dda6e91c4b41b6066977be8d14b20215646"} Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.894310 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ada10bed2f39693c40f6cc764235dda6e91c4b41b6066977be8d14b20215646" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.894397 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-w6p2b" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.897618 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-43c4-account-create-update-jnnqw" event={"ID":"d81d6ce5-367a-40d2-af0c-c2e88c48bc4e","Type":"ContainerDied","Data":"44e0739020ece18240822411c594e7b8d4196a70a383f4e414628cceebd75180"} Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.897644 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-43c4-account-create-update-jnnqw" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.897660 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44e0739020ece18240822411c594e7b8d4196a70a383f4e414628cceebd75180" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.902775 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" event={"ID":"99deca80-3672-484c-9902-878b7a51ac6a","Type":"ContainerDied","Data":"a7d4e8d7ab03a46f73084dcd97580323cc1a22492741042a12d6eaf702610526"} Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.902791 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7d4e8d7ab03a46f73084dcd97580323cc1a22492741042a12d6eaf702610526" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.902824 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-ecac-account-create-update-kqvjh" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.912797 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" event={"ID":"9e3f4b81-c950-440a-a85a-6769c6affd08","Type":"ContainerDied","Data":"c0a3627ba59562c193e274d54687aa2174f0f37c0bc3e3fe369a40d373e717c5"} Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.912839 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0a3627ba59562c193e274d54687aa2174f0f37c0bc3e3fe369a40d373e717c5" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.912803 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-mfr7w" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.916224 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vmnzk" event={"ID":"7118450d-767c-4539-801a-2ab0b5715487","Type":"ContainerDied","Data":"c90ee7a5a2eb07e5eacf4f566446de6cccbc607881c54b56f1b4eb549614d131"} Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.916248 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c90ee7a5a2eb07e5eacf4f566446de6cccbc607881c54b56f1b4eb549614d131" Feb 16 11:29:20 crc kubenswrapper[4949]: I0216 11:29:20.916287 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vmnzk" Feb 16 11:29:22 crc kubenswrapper[4949]: I0216 11:29:22.154743 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-ft6d5"] Feb 16 11:29:22 crc kubenswrapper[4949]: I0216 11:29:22.172707 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-ft6d5"] Feb 16 11:29:23 crc kubenswrapper[4949]: I0216 11:29:23.248153 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bac7946d-0613-402a-911d-a3a7f2ca6d5e" path="/var/lib/kubelet/pods/bac7946d-0613-402a-911d-a3a7f2ca6d5e/volumes" Feb 16 11:29:23 crc kubenswrapper[4949]: I0216 11:29:23.693586 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:23 crc kubenswrapper[4949]: I0216 11:29:23.701994 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b8fafaac-cbaa-4726-91b8-b0739034455f-etc-swift\") pod \"swift-storage-0\" (UID: \"b8fafaac-cbaa-4726-91b8-b0739034455f\") " pod="openstack/swift-storage-0" Feb 16 11:29:23 crc kubenswrapper[4949]: I0216 11:29:23.722162 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 16 11:29:23 crc kubenswrapper[4949]: I0216 11:29:23.951476 4949 generic.go:334] "Generic (PLEG): container finished" podID="3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" containerID="e3f23244327b9e5c8410f1c2b2ec950a72580cedcec271f4603fc1adcb2aaeaf" exitCode=0 Feb 16 11:29:23 crc kubenswrapper[4949]: I0216 11:29:23.951523 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-nvcsr" event={"ID":"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f","Type":"ContainerDied","Data":"e3f23244327b9e5c8410f1c2b2ec950a72580cedcec271f4603fc1adcb2aaeaf"} Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.331209 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-clw7g" podUID="dea77106-f4b1-4515-80bb-ebad1a6effcf" containerName="ovn-controller" probeResult="failure" output=< Feb 16 11:29:24 crc kubenswrapper[4949]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Feb 16 11:29:24 crc kubenswrapper[4949]: > Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.394900 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.395945 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-fbgr9" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.813378 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-clw7g-config-qk469"] Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.813897 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0686a1d4-34d8-48ed-8e05-14f6e2b65462" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.813917 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0686a1d4-34d8-48ed-8e05-14f6e2b65462" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.813930 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e3f4b81-c950-440a-a85a-6769c6affd08" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.813938 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e3f4b81-c950-440a-a85a-6769c6affd08" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.813952 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fbeb10d-7447-462b-8a2b-b68d715a7d9e" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.813960 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fbeb10d-7447-462b-8a2b-b68d715a7d9e" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.813969 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bac7946d-0613-402a-911d-a3a7f2ca6d5e" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.813977 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="bac7946d-0613-402a-911d-a3a7f2ca6d5e" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.813983 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8068ac9c-ae28-47f4-af20-1565bb98c28f" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.813992 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8068ac9c-ae28-47f4-af20-1565bb98c28f" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.814013 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d81d6ce5-367a-40d2-af0c-c2e88c48bc4e" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814020 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d81d6ce5-367a-40d2-af0c-c2e88c48bc4e" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.814028 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99deca80-3672-484c-9902-878b7a51ac6a" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814038 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="99deca80-3672-484c-9902-878b7a51ac6a" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.814051 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7118450d-767c-4539-801a-2ab0b5715487" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814057 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7118450d-767c-4539-801a-2ab0b5715487" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.814073 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f93b07-c059-4191-9ad3-7cb42412b2fb" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814079 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f93b07-c059-4191-9ad3-7cb42412b2fb" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.814096 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cba7e57-5348-4b72-bf94-43da9a6a504d" containerName="init" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814102 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cba7e57-5348-4b72-bf94-43da9a6a504d" containerName="init" Feb 16 11:29:24 crc kubenswrapper[4949]: E0216 11:29:24.814118 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cba7e57-5348-4b72-bf94-43da9a6a504d" containerName="dnsmasq-dns" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814125 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cba7e57-5348-4b72-bf94-43da9a6a504d" containerName="dnsmasq-dns" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814335 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d81d6ce5-367a-40d2-af0c-c2e88c48bc4e" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814347 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8068ac9c-ae28-47f4-af20-1565bb98c28f" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814360 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="99deca80-3672-484c-9902-878b7a51ac6a" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814372 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="0686a1d4-34d8-48ed-8e05-14f6e2b65462" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814381 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="bac7946d-0613-402a-911d-a3a7f2ca6d5e" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814392 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="25f93b07-c059-4191-9ad3-7cb42412b2fb" containerName="mariadb-account-create-update" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814406 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cba7e57-5348-4b72-bf94-43da9a6a504d" containerName="dnsmasq-dns" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814579 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e3f4b81-c950-440a-a85a-6769c6affd08" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814587 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fbeb10d-7447-462b-8a2b-b68d715a7d9e" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.814602 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="7118450d-767c-4539-801a-2ab0b5715487" containerName="mariadb-database-create" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.830058 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.836749 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.842928 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-clw7g-config-qk469"] Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.878839 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-lwh9x"] Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.880707 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.885000 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.885870 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-76nfs" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.903456 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lwh9x"] Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927077 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpq4v\" (UniqueName: \"kubernetes.io/projected/2a271149-395e-4b79-8480-5c146509a20f-kube-api-access-tpq4v\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927235 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-log-ovn\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927272 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-additional-scripts\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927318 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-db-sync-config-data\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927344 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-config-data\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927380 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run-ovn\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927419 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-combined-ca-bundle\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927572 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-scripts\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927677 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwgzw\" (UniqueName: \"kubernetes.io/projected/ffc1202a-a99b-4190-984b-511f9d345832-kube-api-access-xwgzw\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:24 crc kubenswrapper[4949]: I0216 11:29:24.927781 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030360 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpq4v\" (UniqueName: \"kubernetes.io/projected/2a271149-395e-4b79-8480-5c146509a20f-kube-api-access-tpq4v\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030526 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-log-ovn\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030564 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-additional-scripts\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030643 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-config-data\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030673 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-db-sync-config-data\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030702 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run-ovn\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030747 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-combined-ca-bundle\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030788 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-scripts\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030838 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwgzw\" (UniqueName: \"kubernetes.io/projected/ffc1202a-a99b-4190-984b-511f9d345832-kube-api-access-xwgzw\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030884 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.030940 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-log-ovn\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.031022 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.031027 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run-ovn\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.033039 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-additional-scripts\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.036503 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-scripts\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.038057 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-combined-ca-bundle\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.056845 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-config-data\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.057072 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-db-sync-config-data\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.062880 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpq4v\" (UniqueName: \"kubernetes.io/projected/2a271149-395e-4b79-8480-5c146509a20f-kube-api-access-tpq4v\") pod \"ovn-controller-clw7g-config-qk469\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.073918 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwgzw\" (UniqueName: \"kubernetes.io/projected/ffc1202a-a99b-4190-984b-511f9d345832-kube-api-access-xwgzw\") pod \"glance-db-sync-lwh9x\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.167735 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.207588 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.626048 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.748412 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.749398 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-swiftconf\") pod \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.749507 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-combined-ca-bundle\") pod \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.749544 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-etc-swift\") pod \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.749841 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-ring-data-devices\") pod \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.749873 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-scripts\") pod \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.749907 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69hxz\" (UniqueName: \"kubernetes.io/projected/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-kube-api-access-69hxz\") pod \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.749941 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-dispersionconf\") pod \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\" (UID: \"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f\") " Feb 16 11:29:25 crc kubenswrapper[4949]: W0216 11:29:25.750325 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8fafaac_cbaa_4726_91b8_b0739034455f.slice/crio-b922bd847acd0d5dfb0b071015099ba39fada2809f7508bb4d3eb744d2038b00 WatchSource:0}: Error finding container b922bd847acd0d5dfb0b071015099ba39fada2809f7508bb4d3eb744d2038b00: Status 404 returned error can't find the container with id b922bd847acd0d5dfb0b071015099ba39fada2809f7508bb4d3eb744d2038b00 Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.750841 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" (UID: "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.753793 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" (UID: "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.759054 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-kube-api-access-69hxz" (OuterVolumeSpecName: "kube-api-access-69hxz") pod "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" (UID: "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f"). InnerVolumeSpecName "kube-api-access-69hxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.765467 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" (UID: "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.792403 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" (UID: "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.797099 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-scripts" (OuterVolumeSpecName: "scripts") pod "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" (UID: "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.801422 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" (UID: "3aa21f38-ab8c-47b9-9ef0-f879e28eb01f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.853355 4949 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-ring-data-devices\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.853387 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.853397 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69hxz\" (UniqueName: \"kubernetes.io/projected/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-kube-api-access-69hxz\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.853409 4949 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-dispersionconf\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.853420 4949 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-swiftconf\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.853428 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.853436 4949 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3aa21f38-ab8c-47b9-9ef0-f879e28eb01f-etc-swift\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.974716 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerStarted","Data":"c9a4b5d8aa15e598d9c2f85ea3b85e9f0970f9c71a53e01262982fa1a7449218"} Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.976861 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"b922bd847acd0d5dfb0b071015099ba39fada2809f7508bb4d3eb744d2038b00"} Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.978991 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-nvcsr" event={"ID":"3aa21f38-ab8c-47b9-9ef0-f879e28eb01f","Type":"ContainerDied","Data":"605bc19337b4b1148af57750e8b5a2734734e38e9fcee4b136af32fa40ce3cc7"} Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.979045 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="605bc19337b4b1148af57750e8b5a2734734e38e9fcee4b136af32fa40ce3cc7" Feb 16 11:29:25 crc kubenswrapper[4949]: I0216 11:29:25.979128 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nvcsr" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.072245 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-clw7g-config-qk469"] Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.144406 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lwh9x"] Feb 16 11:29:26 crc kubenswrapper[4949]: W0216 11:29:26.147074 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffc1202a_a99b_4190_984b_511f9d345832.slice/crio-80a9324ef8dc16abcdef2282ed1a48ac429a18fe89b5698ae179a236b5a8eb2e WatchSource:0}: Error finding container 80a9324ef8dc16abcdef2282ed1a48ac429a18fe89b5698ae179a236b5a8eb2e: Status 404 returned error can't find the container with id 80a9324ef8dc16abcdef2282ed1a48ac429a18fe89b5698ae179a236b5a8eb2e Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.643467 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg"] Feb 16 11:29:26 crc kubenswrapper[4949]: E0216 11:29:26.644626 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" containerName="swift-ring-rebalance" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.644644 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" containerName="swift-ring-rebalance" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.648447 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="3aa21f38-ab8c-47b9-9ef0-f879e28eb01f" containerName="swift-ring-rebalance" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.650941 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.660331 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg"] Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.778227 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmkld\" (UniqueName: \"kubernetes.io/projected/553663f2-42ac-4491-affa-8a15ca2e093b-kube-api-access-lmkld\") pod \"mysqld-exporter-openstack-cell1-db-create-mx7hg\" (UID: \"553663f2-42ac-4491-affa-8a15ca2e093b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.778586 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/553663f2-42ac-4491-affa-8a15ca2e093b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-mx7hg\" (UID: \"553663f2-42ac-4491-affa-8a15ca2e093b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.854290 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-08b3-account-create-update-69t4v"] Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.855957 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.858465 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.875565 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-08b3-account-create-update-69t4v"] Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.880417 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/553663f2-42ac-4491-affa-8a15ca2e093b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-mx7hg\" (UID: \"553663f2-42ac-4491-affa-8a15ca2e093b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.880594 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmkld\" (UniqueName: \"kubernetes.io/projected/553663f2-42ac-4491-affa-8a15ca2e093b-kube-api-access-lmkld\") pod \"mysqld-exporter-openstack-cell1-db-create-mx7hg\" (UID: \"553663f2-42ac-4491-affa-8a15ca2e093b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.881579 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/553663f2-42ac-4491-affa-8a15ca2e093b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-mx7hg\" (UID: \"553663f2-42ac-4491-affa-8a15ca2e093b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.903216 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmkld\" (UniqueName: \"kubernetes.io/projected/553663f2-42ac-4491-affa-8a15ca2e093b-kube-api-access-lmkld\") pod \"mysqld-exporter-openstack-cell1-db-create-mx7hg\" (UID: \"553663f2-42ac-4491-affa-8a15ca2e093b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.983042 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc9j6\" (UniqueName: \"kubernetes.io/projected/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-kube-api-access-sc9j6\") pod \"mysqld-exporter-08b3-account-create-update-69t4v\" (UID: \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\") " pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.983199 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-operator-scripts\") pod \"mysqld-exporter-08b3-account-create-update-69t4v\" (UID: \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\") " pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.990775 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.992710 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lwh9x" event={"ID":"ffc1202a-a99b-4190-984b-511f9d345832","Type":"ContainerStarted","Data":"80a9324ef8dc16abcdef2282ed1a48ac429a18fe89b5698ae179a236b5a8eb2e"} Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.996246 4949 generic.go:334] "Generic (PLEG): container finished" podID="2a271149-395e-4b79-8480-5c146509a20f" containerID="9e8b3602a501feea39bba77d0760d9ccd52744e0b128acc546f39714fdffa8eb" exitCode=0 Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.996326 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-clw7g-config-qk469" event={"ID":"2a271149-395e-4b79-8480-5c146509a20f","Type":"ContainerDied","Data":"9e8b3602a501feea39bba77d0760d9ccd52744e0b128acc546f39714fdffa8eb"} Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.996431 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-clw7g-config-qk469" event={"ID":"2a271149-395e-4b79-8480-5c146509a20f","Type":"ContainerStarted","Data":"c792358000042bfb4e2e7f3ed0439235262c3fd17c5f95ffdccc434b0a665d76"} Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.999762 4949 generic.go:334] "Generic (PLEG): container finished" podID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerID="184ebec4345da93f1c2a65db2c7a0090e4039d1d6b0ebf753debbf33a95841da" exitCode=0 Feb 16 11:29:26 crc kubenswrapper[4949]: I0216 11:29:26.999840 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2b4e8478-eec0-499f-a824-b0f07355e4f6","Type":"ContainerDied","Data":"184ebec4345da93f1c2a65db2c7a0090e4039d1d6b0ebf753debbf33a95841da"} Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.085654 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc9j6\" (UniqueName: \"kubernetes.io/projected/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-kube-api-access-sc9j6\") pod \"mysqld-exporter-08b3-account-create-update-69t4v\" (UID: \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\") " pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.085742 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-operator-scripts\") pod \"mysqld-exporter-08b3-account-create-update-69t4v\" (UID: \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\") " pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.086742 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-operator-scripts\") pod \"mysqld-exporter-08b3-account-create-update-69t4v\" (UID: \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\") " pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.107621 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc9j6\" (UniqueName: \"kubernetes.io/projected/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-kube-api-access-sc9j6\") pod \"mysqld-exporter-08b3-account-create-update-69t4v\" (UID: \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\") " pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.159811 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-4q8jw"] Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.163243 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.170993 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.198522 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-4q8jw"] Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.290450 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz2dn\" (UniqueName: \"kubernetes.io/projected/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-kube-api-access-lz2dn\") pod \"root-account-create-update-4q8jw\" (UID: \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\") " pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.290651 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-operator-scripts\") pod \"root-account-create-update-4q8jw\" (UID: \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\") " pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.335853 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.397668 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz2dn\" (UniqueName: \"kubernetes.io/projected/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-kube-api-access-lz2dn\") pod \"root-account-create-update-4q8jw\" (UID: \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\") " pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.398184 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-operator-scripts\") pod \"root-account-create-update-4q8jw\" (UID: \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\") " pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.399164 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-operator-scripts\") pod \"root-account-create-update-4q8jw\" (UID: \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\") " pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.422558 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz2dn\" (UniqueName: \"kubernetes.io/projected/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-kube-api-access-lz2dn\") pod \"root-account-create-update-4q8jw\" (UID: \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\") " pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.613562 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:27 crc kubenswrapper[4949]: I0216 11:29:27.655456 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg"] Feb 16 11:29:27 crc kubenswrapper[4949]: W0216 11:29:27.686932 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod553663f2_42ac_4491_affa_8a15ca2e093b.slice/crio-a63ff559d93eba37b23c82892258ab162200812b691ca42d69f77ee340235ff0 WatchSource:0}: Error finding container a63ff559d93eba37b23c82892258ab162200812b691ca42d69f77ee340235ff0: Status 404 returned error can't find the container with id a63ff559d93eba37b23c82892258ab162200812b691ca42d69f77ee340235ff0 Feb 16 11:29:28 crc kubenswrapper[4949]: I0216 11:29:28.067144 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2b4e8478-eec0-499f-a824-b0f07355e4f6","Type":"ContainerStarted","Data":"0aa10bfe0b645929c6b02ae797deef640d296001d6e7f8b326d64d53facdf814"} Feb 16 11:29:28 crc kubenswrapper[4949]: I0216 11:29:28.074910 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Feb 16 11:29:28 crc kubenswrapper[4949]: I0216 11:29:28.079322 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" event={"ID":"553663f2-42ac-4491-affa-8a15ca2e093b","Type":"ContainerStarted","Data":"a63ff559d93eba37b23c82892258ab162200812b691ca42d69f77ee340235ff0"} Feb 16 11:29:28 crc kubenswrapper[4949]: I0216 11:29:28.085926 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"6ded6804bb1b831110daee1a6d3e737c85aac61ab2dd0dfef8af534545504adc"} Feb 16 11:29:28 crc kubenswrapper[4949]: I0216 11:29:28.087959 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-08b3-account-create-update-69t4v"] Feb 16 11:29:28 crc kubenswrapper[4949]: I0216 11:29:28.138732 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.743866411 podStartE2EDuration="1m19.138710699s" podCreationTimestamp="2026-02-16 11:28:09 +0000 UTC" firstStartedPulling="2026-02-16 11:28:11.539987121 +0000 UTC m=+1281.169321286" lastFinishedPulling="2026-02-16 11:28:52.934831399 +0000 UTC m=+1322.564165574" observedRunningTime="2026-02-16 11:29:28.121085376 +0000 UTC m=+1357.750419561" watchObservedRunningTime="2026-02-16 11:29:28.138710699 +0000 UTC m=+1357.768044864" Feb 16 11:29:28 crc kubenswrapper[4949]: I0216 11:29:28.208803 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-4q8jw"] Feb 16 11:29:28 crc kubenswrapper[4949]: W0216 11:29:28.354550 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8715bb56_ef48_4c95_8d9d_85bdefe22a3e.slice/crio-6777c22f50f9948a2522cb8e61905ca1138f79cf60bf550152a267fc07251426 WatchSource:0}: Error finding container 6777c22f50f9948a2522cb8e61905ca1138f79cf60bf550152a267fc07251426: Status 404 returned error can't find the container with id 6777c22f50f9948a2522cb8e61905ca1138f79cf60bf550152a267fc07251426 Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.110942 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-clw7g-config-qk469" event={"ID":"2a271149-395e-4b79-8480-5c146509a20f","Type":"ContainerDied","Data":"c792358000042bfb4e2e7f3ed0439235262c3fd17c5f95ffdccc434b0a665d76"} Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.111331 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c792358000042bfb4e2e7f3ed0439235262c3fd17c5f95ffdccc434b0a665d76" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.115597 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4q8jw" event={"ID":"8715bb56-ef48-4c95-8d9d-85bdefe22a3e","Type":"ContainerStarted","Data":"6777c22f50f9948a2522cb8e61905ca1138f79cf60bf550152a267fc07251426"} Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.118157 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" event={"ID":"553663f2-42ac-4491-affa-8a15ca2e093b","Type":"ContainerStarted","Data":"97cc4c6dd1924f33b67aad9a58754432b2191458b979b128389e8f15c7ab0572"} Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.120517 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" event={"ID":"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2","Type":"ContainerStarted","Data":"e5f6be07b24f84cb37992721f4782c524df8c42a3c0a7ab94067da45dc228786"} Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.127631 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"3709fcdcd5ce5535793c637d26bf12c93efce4fda29b3dae5c2c5e3ae4d33cae"} Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.143857 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.148673 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" podStartSLOduration=3.148651264 podStartE2EDuration="3.148651264s" podCreationTimestamp="2026-02-16 11:29:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:29.136851447 +0000 UTC m=+1358.766185612" watchObservedRunningTime="2026-02-16 11:29:29.148651264 +0000 UTC m=+1358.777985429" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.257387 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run\") pod \"2a271149-395e-4b79-8480-5c146509a20f\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.257612 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpq4v\" (UniqueName: \"kubernetes.io/projected/2a271149-395e-4b79-8480-5c146509a20f-kube-api-access-tpq4v\") pod \"2a271149-395e-4b79-8480-5c146509a20f\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.257643 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-scripts\") pod \"2a271149-395e-4b79-8480-5c146509a20f\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.257734 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-additional-scripts\") pod \"2a271149-395e-4b79-8480-5c146509a20f\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.257789 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-log-ovn\") pod \"2a271149-395e-4b79-8480-5c146509a20f\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.257903 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run-ovn\") pod \"2a271149-395e-4b79-8480-5c146509a20f\" (UID: \"2a271149-395e-4b79-8480-5c146509a20f\") " Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.257939 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run" (OuterVolumeSpecName: "var-run") pod "2a271149-395e-4b79-8480-5c146509a20f" (UID: "2a271149-395e-4b79-8480-5c146509a20f"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.258690 4949 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.259072 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-scripts" (OuterVolumeSpecName: "scripts") pod "2a271149-395e-4b79-8480-5c146509a20f" (UID: "2a271149-395e-4b79-8480-5c146509a20f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.260090 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "2a271149-395e-4b79-8480-5c146509a20f" (UID: "2a271149-395e-4b79-8480-5c146509a20f"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.260140 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "2a271149-395e-4b79-8480-5c146509a20f" (UID: "2a271149-395e-4b79-8480-5c146509a20f"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.266739 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "2a271149-395e-4b79-8480-5c146509a20f" (UID: "2a271149-395e-4b79-8480-5c146509a20f"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.278995 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a271149-395e-4b79-8480-5c146509a20f-kube-api-access-tpq4v" (OuterVolumeSpecName: "kube-api-access-tpq4v") pod "2a271149-395e-4b79-8480-5c146509a20f" (UID: "2a271149-395e-4b79-8480-5c146509a20f"). InnerVolumeSpecName "kube-api-access-tpq4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.368825 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpq4v\" (UniqueName: \"kubernetes.io/projected/2a271149-395e-4b79-8480-5c146509a20f-kube-api-access-tpq4v\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.368864 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.368876 4949 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a271149-395e-4b79-8480-5c146509a20f-additional-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.368888 4949 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.368899 4949 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a271149-395e-4b79-8480-5c146509a20f-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:29 crc kubenswrapper[4949]: I0216 11:29:29.475973 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-clw7g" Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.139705 4949 generic.go:334] "Generic (PLEG): container finished" podID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerID="f75d823174942d793b39e3e51f5a70d2c313e2e41e562884d6fd8299f71d272c" exitCode=0 Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.139789 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f715146-edc4-4f1f-80e3-f134d9833f47","Type":"ContainerDied","Data":"f75d823174942d793b39e3e51f5a70d2c313e2e41e562884d6fd8299f71d272c"} Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.148835 4949 generic.go:334] "Generic (PLEG): container finished" podID="8715bb56-ef48-4c95-8d9d-85bdefe22a3e" containerID="20b29a2ba2f47c347276d6283714b343e860327877ad66350877cf5e1d0d668f" exitCode=0 Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.148934 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4q8jw" event={"ID":"8715bb56-ef48-4c95-8d9d-85bdefe22a3e","Type":"ContainerDied","Data":"20b29a2ba2f47c347276d6283714b343e860327877ad66350877cf5e1d0d668f"} Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.151805 4949 generic.go:334] "Generic (PLEG): container finished" podID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerID="73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9" exitCode=0 Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.151875 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2fd90353-44d0-4269-84cc-f90c10eb6da4","Type":"ContainerDied","Data":"73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9"} Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.163328 4949 generic.go:334] "Generic (PLEG): container finished" podID="553663f2-42ac-4491-affa-8a15ca2e093b" containerID="97cc4c6dd1924f33b67aad9a58754432b2191458b979b128389e8f15c7ab0572" exitCode=0 Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.163420 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" event={"ID":"553663f2-42ac-4491-affa-8a15ca2e093b","Type":"ContainerDied","Data":"97cc4c6dd1924f33b67aad9a58754432b2191458b979b128389e8f15c7ab0572"} Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.174439 4949 generic.go:334] "Generic (PLEG): container finished" podID="ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2" containerID="5114ef3c7b87627e086929c0b1327247fed05cb3e29cca01a258ba0da9cdabc9" exitCode=0 Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.174566 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" event={"ID":"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2","Type":"ContainerDied","Data":"5114ef3c7b87627e086929c0b1327247fed05cb3e29cca01a258ba0da9cdabc9"} Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.183052 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerStarted","Data":"b1586a2ebe8bc4c77bd82b8125c5374baef9f2d70420c2ecdee21fb3eb775a3b"} Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.186347 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-clw7g-config-qk469" Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.191917 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"edaf49b9f18dbe08d51959998a0036561aa8200c9d458fa0f65af589aaa24e65"} Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.191972 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"3afecca10ddcb0929c8b66036eb44ebff201fff840ab2f351d3f9f06b995538b"} Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.413904 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-clw7g-config-qk469"] Feb 16 11:29:30 crc kubenswrapper[4949]: I0216 11:29:30.438935 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-clw7g-config-qk469"] Feb 16 11:29:30 crc kubenswrapper[4949]: E0216 11:29:30.646646 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a271149_395e_4b79_8480_5c146509a20f.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a271149_395e_4b79_8480_5c146509a20f.slice/crio-c792358000042bfb4e2e7f3ed0439235262c3fd17c5f95ffdccc434b0a665d76\": RecentStats: unable to find data in memory cache]" Feb 16 11:29:31 crc kubenswrapper[4949]: I0216 11:29:31.199452 4949 generic.go:334] "Generic (PLEG): container finished" podID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerID="28dbd9a3450bc4c9940c72d847e3a36ef6fd5140215944f266e71e0a01581677" exitCode=0 Feb 16 11:29:31 crc kubenswrapper[4949]: I0216 11:29:31.199544 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a","Type":"ContainerDied","Data":"28dbd9a3450bc4c9940c72d847e3a36ef6fd5140215944f266e71e0a01581677"} Feb 16 11:29:31 crc kubenswrapper[4949]: I0216 11:29:31.204093 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2fd90353-44d0-4269-84cc-f90c10eb6da4","Type":"ContainerStarted","Data":"7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f"} Feb 16 11:29:31 crc kubenswrapper[4949]: I0216 11:29:31.204403 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Feb 16 11:29:31 crc kubenswrapper[4949]: I0216 11:29:31.210692 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f715146-edc4-4f1f-80e3-f134d9833f47","Type":"ContainerStarted","Data":"2bd451c81f0ec8f9fb47a11286a0d4725f67f1e8cea74f5539242bca2957d56c"} Feb 16 11:29:31 crc kubenswrapper[4949]: I0216 11:29:31.275789 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a271149-395e-4b79-8480-5c146509a20f" path="/var/lib/kubelet/pods/2a271149-395e-4b79-8480-5c146509a20f/volumes" Feb 16 11:29:31 crc kubenswrapper[4949]: I0216 11:29:31.383907 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=-9223371954.470898 podStartE2EDuration="1m22.383878147s" podCreationTimestamp="2026-02-16 11:28:09 +0000 UTC" firstStartedPulling="2026-02-16 11:28:11.871223778 +0000 UTC m=+1281.500557943" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:31.369030963 +0000 UTC m=+1360.998365128" watchObservedRunningTime="2026-02-16 11:29:31.383878147 +0000 UTC m=+1361.013212302" Feb 16 11:29:31 crc kubenswrapper[4949]: I0216 11:29:31.469066 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371954.385744 podStartE2EDuration="1m22.469032178s" podCreationTimestamp="2026-02-16 11:28:09 +0000 UTC" firstStartedPulling="2026-02-16 11:28:12.104329259 +0000 UTC m=+1281.733663424" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:31.422127459 +0000 UTC m=+1361.051461624" watchObservedRunningTime="2026-02-16 11:29:31.469032178 +0000 UTC m=+1361.098366343" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.256135 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" event={"ID":"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2","Type":"ContainerDied","Data":"e5f6be07b24f84cb37992721f4782c524df8c42a3c0a7ab94067da45dc228786"} Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.257166 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5f6be07b24f84cb37992721f4782c524df8c42a3c0a7ab94067da45dc228786" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.420380 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.471943 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.544650 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-operator-scripts\") pod \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\" (UID: \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\") " Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.544920 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sc9j6\" (UniqueName: \"kubernetes.io/projected/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-kube-api-access-sc9j6\") pod \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\" (UID: \"ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2\") " Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.548035 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2" (UID: "ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.549510 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-kube-api-access-sc9j6" (OuterVolumeSpecName: "kube-api-access-sc9j6") pod "ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2" (UID: "ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2"). InnerVolumeSpecName "kube-api-access-sc9j6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.645064 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.646339 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz2dn\" (UniqueName: \"kubernetes.io/projected/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-kube-api-access-lz2dn\") pod \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\" (UID: \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\") " Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.646558 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-operator-scripts\") pod \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\" (UID: \"8715bb56-ef48-4c95-8d9d-85bdefe22a3e\") " Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.647138 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sc9j6\" (UniqueName: \"kubernetes.io/projected/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-kube-api-access-sc9j6\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.647154 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.647567 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8715bb56-ef48-4c95-8d9d-85bdefe22a3e" (UID: "8715bb56-ef48-4c95-8d9d-85bdefe22a3e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.651620 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-kube-api-access-lz2dn" (OuterVolumeSpecName: "kube-api-access-lz2dn") pod "8715bb56-ef48-4c95-8d9d-85bdefe22a3e" (UID: "8715bb56-ef48-4c95-8d9d-85bdefe22a3e"). InnerVolumeSpecName "kube-api-access-lz2dn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.751233 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/553663f2-42ac-4491-affa-8a15ca2e093b-operator-scripts\") pod \"553663f2-42ac-4491-affa-8a15ca2e093b\" (UID: \"553663f2-42ac-4491-affa-8a15ca2e093b\") " Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.751602 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmkld\" (UniqueName: \"kubernetes.io/projected/553663f2-42ac-4491-affa-8a15ca2e093b-kube-api-access-lmkld\") pod \"553663f2-42ac-4491-affa-8a15ca2e093b\" (UID: \"553663f2-42ac-4491-affa-8a15ca2e093b\") " Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.752353 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.752379 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz2dn\" (UniqueName: \"kubernetes.io/projected/8715bb56-ef48-4c95-8d9d-85bdefe22a3e-kube-api-access-lz2dn\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.753252 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/553663f2-42ac-4491-affa-8a15ca2e093b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "553663f2-42ac-4491-affa-8a15ca2e093b" (UID: "553663f2-42ac-4491-affa-8a15ca2e093b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.756659 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/553663f2-42ac-4491-affa-8a15ca2e093b-kube-api-access-lmkld" (OuterVolumeSpecName: "kube-api-access-lmkld") pod "553663f2-42ac-4491-affa-8a15ca2e093b" (UID: "553663f2-42ac-4491-affa-8a15ca2e093b"). InnerVolumeSpecName "kube-api-access-lmkld". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.855968 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmkld\" (UniqueName: \"kubernetes.io/projected/553663f2-42ac-4491-affa-8a15ca2e093b-kube-api-access-lmkld\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:32 crc kubenswrapper[4949]: I0216 11:29:32.856003 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/553663f2-42ac-4491-affa-8a15ca2e093b-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.302392 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" event={"ID":"553663f2-42ac-4491-affa-8a15ca2e093b","Type":"ContainerDied","Data":"a63ff559d93eba37b23c82892258ab162200812b691ca42d69f77ee340235ff0"} Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.302673 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg" Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.302753 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a63ff559d93eba37b23c82892258ab162200812b691ca42d69f77ee340235ff0" Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.325413 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"bf14d1d2e2dfdaf1d54cba52e02afa3dcd466daf6bf127b5f9580c82147ea9bb"} Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.325470 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"82280ac915d9b03d477e5d735b72a2bd5bf0b84f1489dbc7ba19094d582f07e0"} Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.335779 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a","Type":"ContainerStarted","Data":"49365de1663135a492ebbe8a93c51b675de302485b8ad97f462bc2835be75d4b"} Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.336100 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.347281 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-08b3-account-create-update-69t4v" Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.347300 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4q8jw" event={"ID":"8715bb56-ef48-4c95-8d9d-85bdefe22a3e","Type":"ContainerDied","Data":"6777c22f50f9948a2522cb8e61905ca1138f79cf60bf550152a267fc07251426"} Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.347327 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6777c22f50f9948a2522cb8e61905ca1138f79cf60bf550152a267fc07251426" Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.347281 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4q8jw" Feb 16 11:29:33 crc kubenswrapper[4949]: I0216 11:29:33.502723 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=-9223371952.352074 podStartE2EDuration="1m24.502701912s" podCreationTimestamp="2026-02-16 11:28:09 +0000 UTC" firstStartedPulling="2026-02-16 11:28:11.870913929 +0000 UTC m=+1281.500248094" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:33.379551488 +0000 UTC m=+1363.008885683" watchObservedRunningTime="2026-02-16 11:29:33.502701912 +0000 UTC m=+1363.132036077" Feb 16 11:29:34 crc kubenswrapper[4949]: I0216 11:29:34.371311 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"354f7124af5a03abad8d9c95327a22521524c695e51e5d5d46f983ee8650a87f"} Feb 16 11:29:34 crc kubenswrapper[4949]: I0216 11:29:34.371864 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"e8690688809095e2efd22ad845a8f67800ad6440cae5fed513b3189df8da6b4b"} Feb 16 11:29:34 crc kubenswrapper[4949]: I0216 11:29:34.550250 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:29:34 crc kubenswrapper[4949]: I0216 11:29:34.550326 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:29:36 crc kubenswrapper[4949]: I0216 11:29:36.412444 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerStarted","Data":"8777d8e1b0069e26d970e9341adf3c6b26e914131602291d755d47675b7fa4ff"} Feb 16 11:29:36 crc kubenswrapper[4949]: I0216 11:29:36.442658 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"32939f511f86ef7705d535ace053ac312afd373add70e05a0f94c8fc9bd357a6"} Feb 16 11:29:36 crc kubenswrapper[4949]: I0216 11:29:36.458918 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.570109868 podStartE2EDuration="1m20.458885638s" podCreationTimestamp="2026-02-16 11:28:16 +0000 UTC" firstStartedPulling="2026-02-16 11:28:19.921469584 +0000 UTC m=+1289.550803749" lastFinishedPulling="2026-02-16 11:29:35.810245354 +0000 UTC m=+1365.439579519" observedRunningTime="2026-02-16 11:29:36.45230868 +0000 UTC m=+1366.081642845" watchObservedRunningTime="2026-02-16 11:29:36.458885638 +0000 UTC m=+1366.088219803" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.039040 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:29:37 crc kubenswrapper[4949]: E0216 11:29:37.040271 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8715bb56-ef48-4c95-8d9d-85bdefe22a3e" containerName="mariadb-account-create-update" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.040297 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8715bb56-ef48-4c95-8d9d-85bdefe22a3e" containerName="mariadb-account-create-update" Feb 16 11:29:37 crc kubenswrapper[4949]: E0216 11:29:37.040311 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a271149-395e-4b79-8480-5c146509a20f" containerName="ovn-config" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.040319 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a271149-395e-4b79-8480-5c146509a20f" containerName="ovn-config" Feb 16 11:29:37 crc kubenswrapper[4949]: E0216 11:29:37.040344 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="553663f2-42ac-4491-affa-8a15ca2e093b" containerName="mariadb-database-create" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.040352 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="553663f2-42ac-4491-affa-8a15ca2e093b" containerName="mariadb-database-create" Feb 16 11:29:37 crc kubenswrapper[4949]: E0216 11:29:37.040367 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2" containerName="mariadb-account-create-update" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.040376 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2" containerName="mariadb-account-create-update" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.040676 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2" containerName="mariadb-account-create-update" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.040695 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8715bb56-ef48-4c95-8d9d-85bdefe22a3e" containerName="mariadb-account-create-update" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.040715 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="553663f2-42ac-4491-affa-8a15ca2e093b" containerName="mariadb-database-create" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.040729 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a271149-395e-4b79-8480-5c146509a20f" containerName="ovn-config" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.041832 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.064257 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.069384 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.082046 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.082510 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gk6gn\" (UniqueName: \"kubernetes.io/projected/bbc90277-5d42-4c6c-a24c-d06f066d4be5-kube-api-access-gk6gn\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.082577 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-config-data\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.185436 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gk6gn\" (UniqueName: \"kubernetes.io/projected/bbc90277-5d42-4c6c-a24c-d06f066d4be5-kube-api-access-gk6gn\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.185504 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-config-data\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.185668 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.195534 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-config-data\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.195646 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.212141 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gk6gn\" (UniqueName: \"kubernetes.io/projected/bbc90277-5d42-4c6c-a24c-d06f066d4be5-kube-api-access-gk6gn\") pod \"mysqld-exporter-0\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.368035 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.539998 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"741e4783e952b7b09b40204d92351bd32ce252807fefe6211e94d7881d7555c1"} Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.834924 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:37 crc kubenswrapper[4949]: W0216 11:29:37.960614 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbbc90277_5d42_4c6c_a24c_d06f066d4be5.slice/crio-1327864664aebf0fc878c2c636d5f78f234aec401f58df168c4a4dba7505c4ac WatchSource:0}: Error finding container 1327864664aebf0fc878c2c636d5f78f234aec401f58df168c4a4dba7505c4ac: Status 404 returned error can't find the container with id 1327864664aebf0fc878c2c636d5f78f234aec401f58df168c4a4dba7505c4ac Feb 16 11:29:37 crc kubenswrapper[4949]: I0216 11:29:37.973960 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:29:38 crc kubenswrapper[4949]: I0216 11:29:38.551267 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"bbc90277-5d42-4c6c-a24c-d06f066d4be5","Type":"ContainerStarted","Data":"1327864664aebf0fc878c2c636d5f78f234aec401f58df168c4a4dba7505c4ac"} Feb 16 11:29:38 crc kubenswrapper[4949]: I0216 11:29:38.560380 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"b3b8242920296794da37f938ea394917744fada8502bf9f25f76c20bef815575"} Feb 16 11:29:40 crc kubenswrapper[4949]: I0216 11:29:40.873708 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.128:5671: connect: connection refused" Feb 16 11:29:40 crc kubenswrapper[4949]: I0216 11:29:40.946221 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Feb 16 11:29:41 crc kubenswrapper[4949]: I0216 11:29:41.116299 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:29:41 crc kubenswrapper[4949]: I0216 11:29:41.120498 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Feb 16 11:29:41 crc kubenswrapper[4949]: I0216 11:29:41.121049 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Feb 16 11:29:46 crc kubenswrapper[4949]: I0216 11:29:46.673592 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"16e651934c57c63bd459dd3ab5af13d98fcf2a1b5f233a6c2fd1ed2e1268c804"} Feb 16 11:29:47 crc kubenswrapper[4949]: I0216 11:29:47.688042 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"bbc90277-5d42-4c6c-a24c-d06f066d4be5","Type":"ContainerStarted","Data":"916eb3462c426bff05c3868b91bd0c2f0f33e4ebdd77dd91f8983587590944cc"} Feb 16 11:29:47 crc kubenswrapper[4949]: I0216 11:29:47.693916 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lwh9x" event={"ID":"ffc1202a-a99b-4190-984b-511f9d345832","Type":"ContainerStarted","Data":"21ae4c45d7caa895caba01d65041e6decaba821786efb5f3701696edea47d790"} Feb 16 11:29:47 crc kubenswrapper[4949]: I0216 11:29:47.705857 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"bf55a26224027d3224128dc08d756770be19a979cfea200d08bddbb9a9a960f2"} Feb 16 11:29:47 crc kubenswrapper[4949]: I0216 11:29:47.764753 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.6029093210000003 podStartE2EDuration="11.764724397s" podCreationTimestamp="2026-02-16 11:29:36 +0000 UTC" firstStartedPulling="2026-02-16 11:29:37.963907953 +0000 UTC m=+1367.593242118" lastFinishedPulling="2026-02-16 11:29:47.125723029 +0000 UTC m=+1376.755057194" observedRunningTime="2026-02-16 11:29:47.716343086 +0000 UTC m=+1377.345677251" watchObservedRunningTime="2026-02-16 11:29:47.764724397 +0000 UTC m=+1377.394058562" Feb 16 11:29:47 crc kubenswrapper[4949]: I0216 11:29:47.841629 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:47 crc kubenswrapper[4949]: I0216 11:29:47.850140 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-lwh9x" podStartSLOduration=3.739588119 podStartE2EDuration="23.850112174s" podCreationTimestamp="2026-02-16 11:29:24 +0000 UTC" firstStartedPulling="2026-02-16 11:29:26.149899127 +0000 UTC m=+1355.779233292" lastFinishedPulling="2026-02-16 11:29:46.260423182 +0000 UTC m=+1375.889757347" observedRunningTime="2026-02-16 11:29:47.75256185 +0000 UTC m=+1377.381896015" watchObservedRunningTime="2026-02-16 11:29:47.850112174 +0000 UTC m=+1377.479446339" Feb 16 11:29:47 crc kubenswrapper[4949]: I0216 11:29:47.888802 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:48 crc kubenswrapper[4949]: I0216 11:29:48.722991 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"b8b8851859dec30687683202c9888c7fce8ee8714bc3c65775e2afccdec59d14"} Feb 16 11:29:48 crc kubenswrapper[4949]: I0216 11:29:48.723328 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b8fafaac-cbaa-4726-91b8-b0739034455f","Type":"ContainerStarted","Data":"c360a54018d55c8be9ff3bbd26232b22df0c115edea68f76e85e3f609fecd6bc"} Feb 16 11:29:48 crc kubenswrapper[4949]: I0216 11:29:48.726025 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:48 crc kubenswrapper[4949]: I0216 11:29:48.770056 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=32.734276416 podStartE2EDuration="42.77003406s" podCreationTimestamp="2026-02-16 11:29:06 +0000 UTC" firstStartedPulling="2026-02-16 11:29:25.760862991 +0000 UTC m=+1355.390197156" lastFinishedPulling="2026-02-16 11:29:35.796620635 +0000 UTC m=+1365.425954800" observedRunningTime="2026-02-16 11:29:48.762217547 +0000 UTC m=+1378.391551702" watchObservedRunningTime="2026-02-16 11:29:48.77003406 +0000 UTC m=+1378.399368225" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.159367 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-pmmbt"] Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.162155 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.167074 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.172309 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-pmmbt"] Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.303913 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.304310 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-config\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.305493 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.305955 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.306216 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.307049 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwkz9\" (UniqueName: \"kubernetes.io/projected/52f0a208-5618-4d66-a2c6-abb21558ad01-kube-api-access-cwkz9\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.409883 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwkz9\" (UniqueName: \"kubernetes.io/projected/52f0a208-5618-4d66-a2c6-abb21558ad01-kube-api-access-cwkz9\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.410012 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.410043 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-config\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.410121 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.410150 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.410222 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.411124 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.411357 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-config\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.411438 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.411670 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.411750 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.436778 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwkz9\" (UniqueName: \"kubernetes.io/projected/52f0a208-5618-4d66-a2c6-abb21558ad01-kube-api-access-cwkz9\") pod \"dnsmasq-dns-77585f5f8c-pmmbt\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:49 crc kubenswrapper[4949]: I0216 11:29:49.520461 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:50 crc kubenswrapper[4949]: I0216 11:29:50.131229 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-pmmbt"] Feb 16 11:29:50 crc kubenswrapper[4949]: W0216 11:29:50.140370 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52f0a208_5618_4d66_a2c6_abb21558ad01.slice/crio-3c26299456be5e8fdc80454808f0dc4935fc8e1a017cb4afbaea47e78f21a1d9 WatchSource:0}: Error finding container 3c26299456be5e8fdc80454808f0dc4935fc8e1a017cb4afbaea47e78f21a1d9: Status 404 returned error can't find the container with id 3c26299456be5e8fdc80454808f0dc4935fc8e1a017cb4afbaea47e78f21a1d9 Feb 16 11:29:50 crc kubenswrapper[4949]: I0216 11:29:50.747845 4949 generic.go:334] "Generic (PLEG): container finished" podID="52f0a208-5618-4d66-a2c6-abb21558ad01" containerID="74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57" exitCode=0 Feb 16 11:29:50 crc kubenswrapper[4949]: I0216 11:29:50.748383 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" event={"ID":"52f0a208-5618-4d66-a2c6-abb21558ad01","Type":"ContainerDied","Data":"74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57"} Feb 16 11:29:50 crc kubenswrapper[4949]: I0216 11:29:50.748483 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" event={"ID":"52f0a208-5618-4d66-a2c6-abb21558ad01","Type":"ContainerStarted","Data":"3c26299456be5e8fdc80454808f0dc4935fc8e1a017cb4afbaea47e78f21a1d9"} Feb 16 11:29:50 crc kubenswrapper[4949]: I0216 11:29:50.873355 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Feb 16 11:29:50 crc kubenswrapper[4949]: I0216 11:29:50.943438 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.030261 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.122582 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.701824 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.702839 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="prometheus" containerID="cri-o://c9a4b5d8aa15e598d9c2f85ea3b85e9f0970f9c71a53e01262982fa1a7449218" gracePeriod=600 Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.702899 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="thanos-sidecar" containerID="cri-o://8777d8e1b0069e26d970e9341adf3c6b26e914131602291d755d47675b7fa4ff" gracePeriod=600 Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.702946 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="config-reloader" containerID="cri-o://b1586a2ebe8bc4c77bd82b8125c5374baef9f2d70420c2ecdee21fb3eb775a3b" gracePeriod=600 Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.760722 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" event={"ID":"52f0a208-5618-4d66-a2c6-abb21558ad01","Type":"ContainerStarted","Data":"956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217"} Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.760980 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:51 crc kubenswrapper[4949]: I0216 11:29:51.791344 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" podStartSLOduration=2.791317593 podStartE2EDuration="2.791317593s" podCreationTimestamp="2026-02-16 11:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:29:51.790756407 +0000 UTC m=+1381.420090582" watchObservedRunningTime="2026-02-16 11:29:51.791317593 +0000 UTC m=+1381.420651768" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.777832 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerID="8777d8e1b0069e26d970e9341adf3c6b26e914131602291d755d47675b7fa4ff" exitCode=0 Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.778284 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerID="b1586a2ebe8bc4c77bd82b8125c5374baef9f2d70420c2ecdee21fb3eb775a3b" exitCode=0 Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.778296 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerID="c9a4b5d8aa15e598d9c2f85ea3b85e9f0970f9c71a53e01262982fa1a7449218" exitCode=0 Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.777926 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerDied","Data":"8777d8e1b0069e26d970e9341adf3c6b26e914131602291d755d47675b7fa4ff"} Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.778553 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerDied","Data":"b1586a2ebe8bc4c77bd82b8125c5374baef9f2d70420c2ecdee21fb3eb775a3b"} Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.778590 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerDied","Data":"c9a4b5d8aa15e598d9c2f85ea3b85e9f0970f9c71a53e01262982fa1a7449218"} Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.778602 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9dd62db2-4af9-482c-b9ad-34021e59dae8","Type":"ContainerDied","Data":"d2117446267676f334f6a60075feb4ec2d444f85f1ee0290f5972a69b0282ebc"} Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.778616 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2117446267676f334f6a60075feb4ec2d444f85f1ee0290f5972a69b0282ebc" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.805732 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812221 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-2\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812347 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-config\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812438 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-tls-assets\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812583 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-thanos-prometheus-http-client-file\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812665 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-1\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812713 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9vhk\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-kube-api-access-x9vhk\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812712 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812951 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.812985 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-web-config\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.813012 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9dd62db2-4af9-482c-b9ad-34021e59dae8-config-out\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.813049 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-0\") pod \"9dd62db2-4af9-482c-b9ad-34021e59dae8\" (UID: \"9dd62db2-4af9-482c-b9ad-34021e59dae8\") " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.813339 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.813799 4949 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.813828 4949 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.813995 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.820050 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.822340 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9dd62db2-4af9-482c-b9ad-34021e59dae8-config-out" (OuterVolumeSpecName: "config-out") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.822438 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-config" (OuterVolumeSpecName: "config") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.870711 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.871680 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-kube-api-access-x9vhk" (OuterVolumeSpecName: "kube-api-access-x9vhk") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "kube-api-access-x9vhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.872074 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "pvc-da4ecb59-5c41-42e2-a659-66ef27f33092". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.909498 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-web-config" (OuterVolumeSpecName: "web-config") pod "9dd62db2-4af9-482c-b9ad-34021e59dae8" (UID: "9dd62db2-4af9-482c-b9ad-34021e59dae8"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.916263 4949 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.916297 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9vhk\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-kube-api-access-x9vhk\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.916337 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") on node \"crc\" " Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.916351 4949 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-web-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.916364 4949 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9dd62db2-4af9-482c-b9ad-34021e59dae8-config-out\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.916375 4949 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9dd62db2-4af9-482c-b9ad-34021e59dae8-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.916384 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/9dd62db2-4af9-482c-b9ad-34021e59dae8-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.916393 4949 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9dd62db2-4af9-482c-b9ad-34021e59dae8-tls-assets\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.975398 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:29:52 crc kubenswrapper[4949]: I0216 11:29:52.975581 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-da4ecb59-5c41-42e2-a659-66ef27f33092" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092") on node "crc" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.018556 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.788599 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.822292 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.840267 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.861025 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:29:53 crc kubenswrapper[4949]: E0216 11:29:53.861678 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="config-reloader" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.861697 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="config-reloader" Feb 16 11:29:53 crc kubenswrapper[4949]: E0216 11:29:53.861711 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="init-config-reloader" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.861718 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="init-config-reloader" Feb 16 11:29:53 crc kubenswrapper[4949]: E0216 11:29:53.861731 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="prometheus" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.861738 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="prometheus" Feb 16 11:29:53 crc kubenswrapper[4949]: E0216 11:29:53.861760 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="thanos-sidecar" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.861767 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="thanos-sidecar" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.861969 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="prometheus" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.861991 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="config-reloader" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.862005 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" containerName="thanos-sidecar" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.863873 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.867160 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.867181 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.867601 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.867815 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.867858 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-s6fl4" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.869647 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.870426 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.870566 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.879788 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.883422 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.941601 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.941679 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.941740 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.941775 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e8565369-4065-464b-8f76-56b3689744e9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.941807 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2plxz\" (UniqueName: \"kubernetes.io/projected/e8565369-4065-464b-8f76-56b3689744e9-kube-api-access-2plxz\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.941910 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.941939 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.942052 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e8565369-4065-464b-8f76-56b3689744e9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.942100 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-config\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.942136 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.942185 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.942212 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:53 crc kubenswrapper[4949]: I0216 11:29:53.942253 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044411 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044478 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044575 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e8565369-4065-464b-8f76-56b3689744e9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044616 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-config\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044655 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044731 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044763 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044810 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044868 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044928 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.044980 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.045009 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e8565369-4065-464b-8f76-56b3689744e9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.045034 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2plxz\" (UniqueName: \"kubernetes.io/projected/e8565369-4065-464b-8f76-56b3689744e9-kube-api-access-2plxz\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.046631 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.047989 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.048291 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/e8565369-4065-464b-8f76-56b3689744e9-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.052401 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.053438 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.053463 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/46dd49080b3b962fa6659badf9039db434c1b5170bb8043ef62ce273d0522d39/globalmount\"" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.054802 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.059387 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e8565369-4065-464b-8f76-56b3689744e9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.059833 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-config\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.061505 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.062044 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e8565369-4065-464b-8f76-56b3689744e9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.063776 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.065596 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/e8565369-4065-464b-8f76-56b3689744e9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.079236 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2plxz\" (UniqueName: \"kubernetes.io/projected/e8565369-4065-464b-8f76-56b3689744e9-kube-api-access-2plxz\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.113496 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-da4ecb59-5c41-42e2-a659-66ef27f33092\") pod \"prometheus-metric-storage-0\" (UID: \"e8565369-4065-464b-8f76-56b3689744e9\") " pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.189630 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 16 11:29:54 crc kubenswrapper[4949]: W0216 11:29:54.724090 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8565369_4065_464b_8f76_56b3689744e9.slice/crio-609ca3723d4c1c6858d0828189290cad4d33a7f5b2bb14010192d31cc0feb018 WatchSource:0}: Error finding container 609ca3723d4c1c6858d0828189290cad4d33a7f5b2bb14010192d31cc0feb018: Status 404 returned error can't find the container with id 609ca3723d4c1c6858d0828189290cad4d33a7f5b2bb14010192d31cc0feb018 Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.726961 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 16 11:29:54 crc kubenswrapper[4949]: I0216 11:29:54.801987 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e8565369-4065-464b-8f76-56b3689744e9","Type":"ContainerStarted","Data":"609ca3723d4c1c6858d0828189290cad4d33a7f5b2bb14010192d31cc0feb018"} Feb 16 11:29:55 crc kubenswrapper[4949]: I0216 11:29:55.261495 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dd62db2-4af9-482c-b9ad-34021e59dae8" path="/var/lib/kubelet/pods/9dd62db2-4af9-482c-b9ad-34021e59dae8/volumes" Feb 16 11:29:56 crc kubenswrapper[4949]: I0216 11:29:56.825125 4949 generic.go:334] "Generic (PLEG): container finished" podID="ffc1202a-a99b-4190-984b-511f9d345832" containerID="21ae4c45d7caa895caba01d65041e6decaba821786efb5f3701696edea47d790" exitCode=0 Feb 16 11:29:56 crc kubenswrapper[4949]: I0216 11:29:56.825262 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lwh9x" event={"ID":"ffc1202a-a99b-4190-984b-511f9d345832","Type":"ContainerDied","Data":"21ae4c45d7caa895caba01d65041e6decaba821786efb5f3701696edea47d790"} Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.393223 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.552656 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-config-data\") pod \"ffc1202a-a99b-4190-984b-511f9d345832\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.552802 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwgzw\" (UniqueName: \"kubernetes.io/projected/ffc1202a-a99b-4190-984b-511f9d345832-kube-api-access-xwgzw\") pod \"ffc1202a-a99b-4190-984b-511f9d345832\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.552873 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-combined-ca-bundle\") pod \"ffc1202a-a99b-4190-984b-511f9d345832\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.552972 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-db-sync-config-data\") pod \"ffc1202a-a99b-4190-984b-511f9d345832\" (UID: \"ffc1202a-a99b-4190-984b-511f9d345832\") " Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.559799 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffc1202a-a99b-4190-984b-511f9d345832-kube-api-access-xwgzw" (OuterVolumeSpecName: "kube-api-access-xwgzw") pod "ffc1202a-a99b-4190-984b-511f9d345832" (UID: "ffc1202a-a99b-4190-984b-511f9d345832"). InnerVolumeSpecName "kube-api-access-xwgzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.560611 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ffc1202a-a99b-4190-984b-511f9d345832" (UID: "ffc1202a-a99b-4190-984b-511f9d345832"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.584720 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffc1202a-a99b-4190-984b-511f9d345832" (UID: "ffc1202a-a99b-4190-984b-511f9d345832"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.620130 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-config-data" (OuterVolumeSpecName: "config-data") pod "ffc1202a-a99b-4190-984b-511f9d345832" (UID: "ffc1202a-a99b-4190-984b-511f9d345832"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.656508 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.656753 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwgzw\" (UniqueName: \"kubernetes.io/projected/ffc1202a-a99b-4190-984b-511f9d345832-kube-api-access-xwgzw\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.656765 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.656775 4949 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc1202a-a99b-4190-984b-511f9d345832-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.848893 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lwh9x" event={"ID":"ffc1202a-a99b-4190-984b-511f9d345832","Type":"ContainerDied","Data":"80a9324ef8dc16abcdef2282ed1a48ac429a18fe89b5698ae179a236b5a8eb2e"} Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.848944 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80a9324ef8dc16abcdef2282ed1a48ac429a18fe89b5698ae179a236b5a8eb2e" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.849030 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lwh9x" Feb 16 11:29:58 crc kubenswrapper[4949]: I0216 11:29:58.851276 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e8565369-4065-464b-8f76-56b3689744e9","Type":"ContainerStarted","Data":"b1a57639c8bb360cec15ae38aa79cd0d6e8ecbcf1ff88db78f0698efc1b63498"} Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.537863 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.651714 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-pmmbt"] Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.708560 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-wl8jh"] Feb 16 11:29:59 crc kubenswrapper[4949]: E0216 11:29:59.709301 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc1202a-a99b-4190-984b-511f9d345832" containerName="glance-db-sync" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.709325 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc1202a-a99b-4190-984b-511f9d345832" containerName="glance-db-sync" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.709616 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc1202a-a99b-4190-984b-511f9d345832" containerName="glance-db-sync" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.711070 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.720387 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-wl8jh"] Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.768436 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.768533 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.768583 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.768724 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.768758 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-config\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.768800 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh2m8\" (UniqueName: \"kubernetes.io/projected/6c60c38e-2850-474f-85f8-2bd436299ebe-kube-api-access-qh2m8\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.871858 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.871936 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.871970 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.872004 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-config\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.872045 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh2m8\" (UniqueName: \"kubernetes.io/projected/6c60c38e-2850-474f-85f8-2bd436299ebe-kube-api-access-qh2m8\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.872126 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.873153 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.873287 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.873546 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-config\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.876406 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.876862 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" podUID="52f0a208-5618-4d66-a2c6-abb21558ad01" containerName="dnsmasq-dns" containerID="cri-o://956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217" gracePeriod=10 Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.877454 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:29:59 crc kubenswrapper[4949]: I0216 11:29:59.910097 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh2m8\" (UniqueName: \"kubernetes.io/projected/6c60c38e-2850-474f-85f8-2bd436299ebe-kube-api-access-qh2m8\") pod \"dnsmasq-dns-7ff5475cc9-wl8jh\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.046470 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.163322 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4"] Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.164977 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.167708 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.169294 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.179061 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4"] Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.281739 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnks8\" (UniqueName: \"kubernetes.io/projected/cb474750-e885-4f04-b26c-fedb8cc342ce-kube-api-access-wnks8\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.281869 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb474750-e885-4f04-b26c-fedb8cc342ce-config-volume\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.281977 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb474750-e885-4f04-b26c-fedb8cc342ce-secret-volume\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.383917 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnks8\" (UniqueName: \"kubernetes.io/projected/cb474750-e885-4f04-b26c-fedb8cc342ce-kube-api-access-wnks8\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.384398 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb474750-e885-4f04-b26c-fedb8cc342ce-config-volume\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.384526 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb474750-e885-4f04-b26c-fedb8cc342ce-secret-volume\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.385643 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb474750-e885-4f04-b26c-fedb8cc342ce-config-volume\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.397337 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb474750-e885-4f04-b26c-fedb8cc342ce-secret-volume\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.416025 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnks8\" (UniqueName: \"kubernetes.io/projected/cb474750-e885-4f04-b26c-fedb8cc342ce-kube-api-access-wnks8\") pod \"collect-profiles-29520690-hwpp4\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.473367 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.516870 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.588001 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-config\") pod \"52f0a208-5618-4d66-a2c6-abb21558ad01\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.588065 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-nb\") pod \"52f0a208-5618-4d66-a2c6-abb21558ad01\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.588228 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-svc\") pod \"52f0a208-5618-4d66-a2c6-abb21558ad01\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.588278 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-sb\") pod \"52f0a208-5618-4d66-a2c6-abb21558ad01\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.588360 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-swift-storage-0\") pod \"52f0a208-5618-4d66-a2c6-abb21558ad01\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.588407 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwkz9\" (UniqueName: \"kubernetes.io/projected/52f0a208-5618-4d66-a2c6-abb21558ad01-kube-api-access-cwkz9\") pod \"52f0a208-5618-4d66-a2c6-abb21558ad01\" (UID: \"52f0a208-5618-4d66-a2c6-abb21558ad01\") " Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.593989 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52f0a208-5618-4d66-a2c6-abb21558ad01-kube-api-access-cwkz9" (OuterVolumeSpecName: "kube-api-access-cwkz9") pod "52f0a208-5618-4d66-a2c6-abb21558ad01" (UID: "52f0a208-5618-4d66-a2c6-abb21558ad01"). InnerVolumeSpecName "kube-api-access-cwkz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.653331 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "52f0a208-5618-4d66-a2c6-abb21558ad01" (UID: "52f0a208-5618-4d66-a2c6-abb21558ad01"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.655851 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "52f0a208-5618-4d66-a2c6-abb21558ad01" (UID: "52f0a208-5618-4d66-a2c6-abb21558ad01"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.658659 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "52f0a208-5618-4d66-a2c6-abb21558ad01" (UID: "52f0a208-5618-4d66-a2c6-abb21558ad01"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.664270 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "52f0a208-5618-4d66-a2c6-abb21558ad01" (UID: "52f0a208-5618-4d66-a2c6-abb21558ad01"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.691687 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.691718 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.691730 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.691745 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.691757 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwkz9\" (UniqueName: \"kubernetes.io/projected/52f0a208-5618-4d66-a2c6-abb21558ad01-kube-api-access-cwkz9\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.692966 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-config" (OuterVolumeSpecName: "config") pod "52f0a208-5618-4d66-a2c6-abb21558ad01" (UID: "52f0a208-5618-4d66-a2c6-abb21558ad01"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.780884 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-wl8jh"] Feb 16 11:30:00 crc kubenswrapper[4949]: W0216 11:30:00.781480 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c60c38e_2850_474f_85f8_2bd436299ebe.slice/crio-c74486cd9df4468784f4ea0b598e579b076428dce6441b01adc6e6c064834e2d WatchSource:0}: Error finding container c74486cd9df4468784f4ea0b598e579b076428dce6441b01adc6e6c064834e2d: Status 404 returned error can't find the container with id c74486cd9df4468784f4ea0b598e579b076428dce6441b01adc6e6c064834e2d Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.793517 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52f0a208-5618-4d66-a2c6-abb21558ad01-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.898978 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" event={"ID":"6c60c38e-2850-474f-85f8-2bd436299ebe","Type":"ContainerStarted","Data":"c74486cd9df4468784f4ea0b598e579b076428dce6441b01adc6e6c064834e2d"} Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.911402 4949 generic.go:334] "Generic (PLEG): container finished" podID="52f0a208-5618-4d66-a2c6-abb21558ad01" containerID="956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217" exitCode=0 Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.911474 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" event={"ID":"52f0a208-5618-4d66-a2c6-abb21558ad01","Type":"ContainerDied","Data":"956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217"} Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.911518 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" event={"ID":"52f0a208-5618-4d66-a2c6-abb21558ad01","Type":"ContainerDied","Data":"3c26299456be5e8fdc80454808f0dc4935fc8e1a017cb4afbaea47e78f21a1d9"} Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.911544 4949 scope.go:117] "RemoveContainer" containerID="956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.911767 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-pmmbt" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.973942 4949 scope.go:117] "RemoveContainer" containerID="74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57" Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.981515 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-pmmbt"] Feb 16 11:30:00 crc kubenswrapper[4949]: I0216 11:30:00.993284 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-pmmbt"] Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.022668 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.083553 4949 scope.go:117] "RemoveContainer" containerID="956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217" Feb 16 11:30:01 crc kubenswrapper[4949]: E0216 11:30:01.089408 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217\": container with ID starting with 956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217 not found: ID does not exist" containerID="956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.089475 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217"} err="failed to get container status \"956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217\": rpc error: code = NotFound desc = could not find container \"956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217\": container with ID starting with 956e60c906d66049e29c17da43fb37acc8e2cfe5c3093ef56e91262705424217 not found: ID does not exist" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.089511 4949 scope.go:117] "RemoveContainer" containerID="74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57" Feb 16 11:30:01 crc kubenswrapper[4949]: E0216 11:30:01.092828 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57\": container with ID starting with 74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57 not found: ID does not exist" containerID="74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.092882 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57"} err="failed to get container status \"74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57\": rpc error: code = NotFound desc = could not find container \"74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57\": container with ID starting with 74314f219cfdbec6270fe6739719c4ed26c057032c17f30558751abd21010e57 not found: ID does not exist" Feb 16 11:30:01 crc kubenswrapper[4949]: E0216 11:30:01.103744 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52f0a208_5618_4d66_a2c6_abb21558ad01.slice/crio-3c26299456be5e8fdc80454808f0dc4935fc8e1a017cb4afbaea47e78f21a1d9\": RecentStats: unable to find data in memory cache]" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.132185 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4"] Feb 16 11:30:01 crc kubenswrapper[4949]: W0216 11:30:01.134364 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb474750_e885_4f04_b26c_fedb8cc342ce.slice/crio-5a5d1b85fa6f9ff3698b2cca96edf5d1ceffd6a9b1eb98719d6b4827790dec24 WatchSource:0}: Error finding container 5a5d1b85fa6f9ff3698b2cca96edf5d1ceffd6a9b1eb98719d6b4827790dec24: Status 404 returned error can't find the container with id 5a5d1b85fa6f9ff3698b2cca96edf5d1ceffd6a9b1eb98719d6b4827790dec24 Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.271388 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52f0a208-5618-4d66-a2c6-abb21558ad01" path="/var/lib/kubelet/pods/52f0a208-5618-4d66-a2c6-abb21558ad01/volumes" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.490428 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-xg9xl"] Feb 16 11:30:01 crc kubenswrapper[4949]: E0216 11:30:01.491136 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f0a208-5618-4d66-a2c6-abb21558ad01" containerName="dnsmasq-dns" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.491161 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f0a208-5618-4d66-a2c6-abb21558ad01" containerName="dnsmasq-dns" Feb 16 11:30:01 crc kubenswrapper[4949]: E0216 11:30:01.501101 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f0a208-5618-4d66-a2c6-abb21558ad01" containerName="init" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.501149 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f0a208-5618-4d66-a2c6-abb21558ad01" containerName="init" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.501634 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="52f0a208-5618-4d66-a2c6-abb21558ad01" containerName="dnsmasq-dns" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.502560 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.530663 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-xg9xl"] Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.617100 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5f53b06-283f-465d-93ba-4366a5e2147c-operator-scripts\") pod \"heat-db-create-xg9xl\" (UID: \"d5f53b06-283f-465d-93ba-4366a5e2147c\") " pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.617523 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2v7s\" (UniqueName: \"kubernetes.io/projected/d5f53b06-283f-465d-93ba-4366a5e2147c-kube-api-access-t2v7s\") pod \"heat-db-create-xg9xl\" (UID: \"d5f53b06-283f-465d-93ba-4366a5e2147c\") " pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.679559 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-6n2w7"] Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.681238 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.697386 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-7e44-account-create-update-pzvfn"] Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.699366 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.703603 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.719327 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5f53b06-283f-465d-93ba-4366a5e2147c-operator-scripts\") pod \"heat-db-create-xg9xl\" (UID: \"d5f53b06-283f-465d-93ba-4366a5e2147c\") " pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.719400 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2v7s\" (UniqueName: \"kubernetes.io/projected/d5f53b06-283f-465d-93ba-4366a5e2147c-kube-api-access-t2v7s\") pod \"heat-db-create-xg9xl\" (UID: \"d5f53b06-283f-465d-93ba-4366a5e2147c\") " pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.720382 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5f53b06-283f-465d-93ba-4366a5e2147c-operator-scripts\") pod \"heat-db-create-xg9xl\" (UID: \"d5f53b06-283f-465d-93ba-4366a5e2147c\") " pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.732267 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-7e44-account-create-update-pzvfn"] Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.750344 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6n2w7"] Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.778965 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2v7s\" (UniqueName: \"kubernetes.io/projected/d5f53b06-283f-465d-93ba-4366a5e2147c-kube-api-access-t2v7s\") pod \"heat-db-create-xg9xl\" (UID: \"d5f53b06-283f-465d-93ba-4366a5e2147c\") " pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.821643 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pqqz\" (UniqueName: \"kubernetes.io/projected/b74df024-1e28-43d0-b364-9cab63fda88b-kube-api-access-6pqqz\") pod \"heat-7e44-account-create-update-pzvfn\" (UID: \"b74df024-1e28-43d0-b364-9cab63fda88b\") " pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.821759 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b74df024-1e28-43d0-b364-9cab63fda88b-operator-scripts\") pod \"heat-7e44-account-create-update-pzvfn\" (UID: \"b74df024-1e28-43d0-b364-9cab63fda88b\") " pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.821903 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvtpb\" (UniqueName: \"kubernetes.io/projected/79fc5ddc-25fd-4b68-957d-ebe934bc9388-kube-api-access-jvtpb\") pod \"cinder-db-create-6n2w7\" (UID: \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\") " pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.821964 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79fc5ddc-25fd-4b68-957d-ebe934bc9388-operator-scripts\") pod \"cinder-db-create-6n2w7\" (UID: \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\") " pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.827021 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-npkkr"] Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.828604 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.845942 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.863654 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-npkkr"] Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.933845 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b74df024-1e28-43d0-b364-9cab63fda88b-operator-scripts\") pod \"heat-7e44-account-create-update-pzvfn\" (UID: \"b74df024-1e28-43d0-b364-9cab63fda88b\") " pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.933983 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvtpb\" (UniqueName: \"kubernetes.io/projected/79fc5ddc-25fd-4b68-957d-ebe934bc9388-kube-api-access-jvtpb\") pod \"cinder-db-create-6n2w7\" (UID: \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\") " pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.934030 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mv6n\" (UniqueName: \"kubernetes.io/projected/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-kube-api-access-5mv6n\") pod \"neutron-db-create-npkkr\" (UID: \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\") " pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.934054 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79fc5ddc-25fd-4b68-957d-ebe934bc9388-operator-scripts\") pod \"cinder-db-create-6n2w7\" (UID: \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\") " pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.934081 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-operator-scripts\") pod \"neutron-db-create-npkkr\" (UID: \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\") " pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.934194 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pqqz\" (UniqueName: \"kubernetes.io/projected/b74df024-1e28-43d0-b364-9cab63fda88b-kube-api-access-6pqqz\") pod \"heat-7e44-account-create-update-pzvfn\" (UID: \"b74df024-1e28-43d0-b364-9cab63fda88b\") " pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.934821 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b74df024-1e28-43d0-b364-9cab63fda88b-operator-scripts\") pod \"heat-7e44-account-create-update-pzvfn\" (UID: \"b74df024-1e28-43d0-b364-9cab63fda88b\") " pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.935491 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79fc5ddc-25fd-4b68-957d-ebe934bc9388-operator-scripts\") pod \"cinder-db-create-6n2w7\" (UID: \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\") " pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.964094 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" event={"ID":"cb474750-e885-4f04-b26c-fedb8cc342ce","Type":"ContainerStarted","Data":"8662e3fc86f40863e7b490e1aa8d615df2b8a4cdd973bc218a0211a809448ffe"} Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.964285 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" event={"ID":"cb474750-e885-4f04-b26c-fedb8cc342ce","Type":"ContainerStarted","Data":"5a5d1b85fa6f9ff3698b2cca96edf5d1ceffd6a9b1eb98719d6b4827790dec24"} Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.973486 4949 generic.go:334] "Generic (PLEG): container finished" podID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerID="e468ea0ac8a33755ff6f7b898d6e89c147068482f948dcf3da738d409ba619d8" exitCode=0 Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.973845 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" event={"ID":"6c60c38e-2850-474f-85f8-2bd436299ebe","Type":"ContainerDied","Data":"e468ea0ac8a33755ff6f7b898d6e89c147068482f948dcf3da738d409ba619d8"} Feb 16 11:30:01 crc kubenswrapper[4949]: I0216 11:30:01.992113 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvtpb\" (UniqueName: \"kubernetes.io/projected/79fc5ddc-25fd-4b68-957d-ebe934bc9388-kube-api-access-jvtpb\") pod \"cinder-db-create-6n2w7\" (UID: \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\") " pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.010068 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" podStartSLOduration=2.010046365 podStartE2EDuration="2.010046365s" podCreationTimestamp="2026-02-16 11:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:02.008616834 +0000 UTC m=+1391.637950999" watchObservedRunningTime="2026-02-16 11:30:02.010046365 +0000 UTC m=+1391.639380520" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.016161 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.030935 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pqqz\" (UniqueName: \"kubernetes.io/projected/b74df024-1e28-43d0-b364-9cab63fda88b-kube-api-access-6pqqz\") pod \"heat-7e44-account-create-update-pzvfn\" (UID: \"b74df024-1e28-43d0-b364-9cab63fda88b\") " pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.037089 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-operator-scripts\") pod \"neutron-db-create-npkkr\" (UID: \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\") " pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.037734 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mv6n\" (UniqueName: \"kubernetes.io/projected/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-kube-api-access-5mv6n\") pod \"neutron-db-create-npkkr\" (UID: \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\") " pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.040262 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-operator-scripts\") pod \"neutron-db-create-npkkr\" (UID: \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\") " pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.044200 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.114771 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mv6n\" (UniqueName: \"kubernetes.io/projected/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-kube-api-access-5mv6n\") pod \"neutron-db-create-npkkr\" (UID: \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\") " pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.177944 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.275264 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-0db9-account-create-update-tt5pm"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.277517 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.299225 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.321413 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-nv9nl"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.323685 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.326867 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.340888 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.341576 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-h4zml" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.362086 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.364402 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81ef1e0f-3024-4742-952e-44fe02054f6d-operator-scripts\") pod \"neutron-0db9-account-create-update-tt5pm\" (UID: \"81ef1e0f-3024-4742-952e-44fe02054f6d\") " pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.364577 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzfql\" (UniqueName: \"kubernetes.io/projected/81ef1e0f-3024-4742-952e-44fe02054f6d-kube-api-access-hzfql\") pod \"neutron-0db9-account-create-update-tt5pm\" (UID: \"81ef1e0f-3024-4742-952e-44fe02054f6d\") " pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.385672 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0db9-account-create-update-tt5pm"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.490252 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-f4ctx"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.498269 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.502211 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81ef1e0f-3024-4742-952e-44fe02054f6d-operator-scripts\") pod \"neutron-0db9-account-create-update-tt5pm\" (UID: \"81ef1e0f-3024-4742-952e-44fe02054f6d\") " pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.502309 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-config-data\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.502444 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzfql\" (UniqueName: \"kubernetes.io/projected/81ef1e0f-3024-4742-952e-44fe02054f6d-kube-api-access-hzfql\") pod \"neutron-0db9-account-create-update-tt5pm\" (UID: \"81ef1e0f-3024-4742-952e-44fe02054f6d\") " pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.502477 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-combined-ca-bundle\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.502521 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfjj6\" (UniqueName: \"kubernetes.io/projected/63e31d0f-ee9a-49ae-8699-2915a121ac10-kube-api-access-gfjj6\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.503640 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81ef1e0f-3024-4742-952e-44fe02054f6d-operator-scripts\") pod \"neutron-0db9-account-create-update-tt5pm\" (UID: \"81ef1e0f-3024-4742-952e-44fe02054f6d\") " pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.559051 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzfql\" (UniqueName: \"kubernetes.io/projected/81ef1e0f-3024-4742-952e-44fe02054f6d-kube-api-access-hzfql\") pod \"neutron-0db9-account-create-update-tt5pm\" (UID: \"81ef1e0f-3024-4742-952e-44fe02054f6d\") " pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.565751 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-nv9nl"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.603851 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-f4ctx"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.608571 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-combined-ca-bundle\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.608628 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43ddf580-ace8-4f9c-b767-12a5514ac753-operator-scripts\") pod \"barbican-db-create-f4ctx\" (UID: \"43ddf580-ace8-4f9c-b767-12a5514ac753\") " pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.608672 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfjj6\" (UniqueName: \"kubernetes.io/projected/63e31d0f-ee9a-49ae-8699-2915a121ac10-kube-api-access-gfjj6\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.608764 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w42pr\" (UniqueName: \"kubernetes.io/projected/43ddf580-ace8-4f9c-b767-12a5514ac753-kube-api-access-w42pr\") pod \"barbican-db-create-f4ctx\" (UID: \"43ddf580-ace8-4f9c-b767-12a5514ac753\") " pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.608864 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-config-data\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.633017 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-config-data\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.641286 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-combined-ca-bundle\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.649260 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfjj6\" (UniqueName: \"kubernetes.io/projected/63e31d0f-ee9a-49ae-8699-2915a121ac10-kube-api-access-gfjj6\") pod \"keystone-db-sync-nv9nl\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.663230 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.681990 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.712775 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43ddf580-ace8-4f9c-b767-12a5514ac753-operator-scripts\") pod \"barbican-db-create-f4ctx\" (UID: \"43ddf580-ace8-4f9c-b767-12a5514ac753\") " pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.712866 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w42pr\" (UniqueName: \"kubernetes.io/projected/43ddf580-ace8-4f9c-b767-12a5514ac753-kube-api-access-w42pr\") pod \"barbican-db-create-f4ctx\" (UID: \"43ddf580-ace8-4f9c-b767-12a5514ac753\") " pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.714231 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43ddf580-ace8-4f9c-b767-12a5514ac753-operator-scripts\") pod \"barbican-db-create-f4ctx\" (UID: \"43ddf580-ace8-4f9c-b767-12a5514ac753\") " pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.723164 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-d46b-account-create-update-2f9k8"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.737161 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.744339 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.772741 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w42pr\" (UniqueName: \"kubernetes.io/projected/43ddf580-ace8-4f9c-b767-12a5514ac753-kube-api-access-w42pr\") pod \"barbican-db-create-f4ctx\" (UID: \"43ddf580-ace8-4f9c-b767-12a5514ac753\") " pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.780255 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d46b-account-create-update-2f9k8"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.920641 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz25q\" (UniqueName: \"kubernetes.io/projected/c899f773-b326-4b03-8fbb-87e97cafd63b-kube-api-access-tz25q\") pod \"cinder-d46b-account-create-update-2f9k8\" (UID: \"c899f773-b326-4b03-8fbb-87e97cafd63b\") " pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.920774 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c899f773-b326-4b03-8fbb-87e97cafd63b-operator-scripts\") pod \"cinder-d46b-account-create-update-2f9k8\" (UID: \"c899f773-b326-4b03-8fbb-87e97cafd63b\") " pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.993078 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-7ff2-account-create-update-c6fg9"] Feb 16 11:30:02 crc kubenswrapper[4949]: I0216 11:30:02.996239 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.005439 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.024997 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz25q\" (UniqueName: \"kubernetes.io/projected/c899f773-b326-4b03-8fbb-87e97cafd63b-kube-api-access-tz25q\") pod \"cinder-d46b-account-create-update-2f9k8\" (UID: \"c899f773-b326-4b03-8fbb-87e97cafd63b\") " pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.025103 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c899f773-b326-4b03-8fbb-87e97cafd63b-operator-scripts\") pod \"cinder-d46b-account-create-update-2f9k8\" (UID: \"c899f773-b326-4b03-8fbb-87e97cafd63b\") " pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.026454 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c899f773-b326-4b03-8fbb-87e97cafd63b-operator-scripts\") pod \"cinder-d46b-account-create-update-2f9k8\" (UID: \"c899f773-b326-4b03-8fbb-87e97cafd63b\") " pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.049347 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7ff2-account-create-update-c6fg9"] Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.068055 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz25q\" (UniqueName: \"kubernetes.io/projected/c899f773-b326-4b03-8fbb-87e97cafd63b-kube-api-access-tz25q\") pod \"cinder-d46b-account-create-update-2f9k8\" (UID: \"c899f773-b326-4b03-8fbb-87e97cafd63b\") " pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.120132 4949 generic.go:334] "Generic (PLEG): container finished" podID="cb474750-e885-4f04-b26c-fedb8cc342ce" containerID="8662e3fc86f40863e7b490e1aa8d615df2b8a4cdd973bc218a0211a809448ffe" exitCode=0 Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.120225 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" event={"ID":"cb474750-e885-4f04-b26c-fedb8cc342ce","Type":"ContainerDied","Data":"8662e3fc86f40863e7b490e1aa8d615df2b8a4cdd973bc218a0211a809448ffe"} Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.127627 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6lf8\" (UniqueName: \"kubernetes.io/projected/c4546872-04ea-4396-acf2-380838b528d0-kube-api-access-r6lf8\") pod \"barbican-7ff2-account-create-update-c6fg9\" (UID: \"c4546872-04ea-4396-acf2-380838b528d0\") " pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.127842 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4546872-04ea-4396-acf2-380838b528d0-operator-scripts\") pod \"barbican-7ff2-account-create-update-c6fg9\" (UID: \"c4546872-04ea-4396-acf2-380838b528d0\") " pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.206896 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.209842 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.230228 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6lf8\" (UniqueName: \"kubernetes.io/projected/c4546872-04ea-4396-acf2-380838b528d0-kube-api-access-r6lf8\") pod \"barbican-7ff2-account-create-update-c6fg9\" (UID: \"c4546872-04ea-4396-acf2-380838b528d0\") " pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.231046 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4546872-04ea-4396-acf2-380838b528d0-operator-scripts\") pod \"barbican-7ff2-account-create-update-c6fg9\" (UID: \"c4546872-04ea-4396-acf2-380838b528d0\") " pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.234741 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4546872-04ea-4396-acf2-380838b528d0-operator-scripts\") pod \"barbican-7ff2-account-create-update-c6fg9\" (UID: \"c4546872-04ea-4396-acf2-380838b528d0\") " pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.285016 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6lf8\" (UniqueName: \"kubernetes.io/projected/c4546872-04ea-4396-acf2-380838b528d0-kube-api-access-r6lf8\") pod \"barbican-7ff2-account-create-update-c6fg9\" (UID: \"c4546872-04ea-4396-acf2-380838b528d0\") " pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.285940 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.361086 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-xg9xl"] Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.818574 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6n2w7"] Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.849823 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-7e44-account-create-update-pzvfn"] Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.872641 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-npkkr"] Feb 16 11:30:03 crc kubenswrapper[4949]: I0216 11:30:03.959588 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0db9-account-create-update-tt5pm"] Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.153040 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-nv9nl"] Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.189684 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0db9-account-create-update-tt5pm" event={"ID":"81ef1e0f-3024-4742-952e-44fe02054f6d","Type":"ContainerStarted","Data":"65a061e7476c49e432f1a5786507387c2cc070bed1dc2d3a8ffa7fa2bb2da0dd"} Feb 16 11:30:04 crc kubenswrapper[4949]: W0216 11:30:04.243666 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63e31d0f_ee9a_49ae_8699_2915a121ac10.slice/crio-d33a099b8483c6eed4c5e41a1ef512d0e246b7d91f2983ecb21d53d9b14ffca8 WatchSource:0}: Error finding container d33a099b8483c6eed4c5e41a1ef512d0e246b7d91f2983ecb21d53d9b14ffca8: Status 404 returned error can't find the container with id d33a099b8483c6eed4c5e41a1ef512d0e246b7d91f2983ecb21d53d9b14ffca8 Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.245330 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xg9xl" event={"ID":"d5f53b06-283f-465d-93ba-4366a5e2147c","Type":"ContainerStarted","Data":"365e55270b72d7249a7641b519d059c382ea6330171d9bcdd82172b83f8818c2"} Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.265756 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-7e44-account-create-update-pzvfn" event={"ID":"b74df024-1e28-43d0-b364-9cab63fda88b","Type":"ContainerStarted","Data":"5ade8fbf90c562f875e653aad4998aae166a39e953cf056258d14434bbcdc075"} Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.270148 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" event={"ID":"6c60c38e-2850-474f-85f8-2bd436299ebe","Type":"ContainerStarted","Data":"c7fc92c9e2443a29efc50b9daa8d763effbd9224e5e5382c017f887381e6a1e2"} Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.271573 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.274115 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6n2w7" event={"ID":"79fc5ddc-25fd-4b68-957d-ebe934bc9388","Type":"ContainerStarted","Data":"6b3fe74d7cf6af923bd6712c71704a1660edd60a64c0fcfcf483472d3de9b373"} Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.301959 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-npkkr" event={"ID":"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4","Type":"ContainerStarted","Data":"43fb5d1b62595ebfd5f3aeb71133328d2dad10fada06feef6873810b753f49be"} Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.362062 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" podStartSLOduration=5.362038725 podStartE2EDuration="5.362038725s" podCreationTimestamp="2026-02-16 11:29:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:04.313433318 +0000 UTC m=+1393.942767493" watchObservedRunningTime="2026-02-16 11:30:04.362038725 +0000 UTC m=+1393.991372890" Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.559899 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.573318 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:30:04 crc kubenswrapper[4949]: I0216 11:30:04.888135 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-f4ctx"] Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.366847 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d46b-account-create-update-2f9k8"] Feb 16 11:30:05 crc kubenswrapper[4949]: W0216 11:30:05.369236 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4546872_04ea_4396_acf2_380838b528d0.slice/crio-250e2f924023d0be24f8162da7ffbc79cfe643902fd081d044f40e9d21531e49 WatchSource:0}: Error finding container 250e2f924023d0be24f8162da7ffbc79cfe643902fd081d044f40e9d21531e49: Status 404 returned error can't find the container with id 250e2f924023d0be24f8162da7ffbc79cfe643902fd081d044f40e9d21531e49 Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.374580 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f4ctx" event={"ID":"43ddf580-ace8-4f9c-b767-12a5514ac753","Type":"ContainerStarted","Data":"90954bd8fb49b73b58c7c5dc29aae7651086f291173b24852af3a6cd070620cb"} Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.378951 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7ff2-account-create-update-c6fg9"] Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.385904 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0db9-account-create-update-tt5pm" event={"ID":"81ef1e0f-3024-4742-952e-44fe02054f6d","Type":"ContainerStarted","Data":"19c6e14200617dc051c50463cc0dc5e299b5f18d971cc76865078b394c97dbb1"} Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.391549 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xg9xl" event={"ID":"d5f53b06-283f-465d-93ba-4366a5e2147c","Type":"ContainerStarted","Data":"8abc2f2ddd1dfa9f929835d468021b1acaf6cbfeb080fe16f591723d85b32a41"} Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.397092 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-7e44-account-create-update-pzvfn" event={"ID":"b74df024-1e28-43d0-b364-9cab63fda88b","Type":"ContainerStarted","Data":"4f151eaf5b83281979d53a553d1543b6885a943977217f39f17da98cb7834023"} Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.405104 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-nv9nl" event={"ID":"63e31d0f-ee9a-49ae-8699-2915a121ac10","Type":"ContainerStarted","Data":"d33a099b8483c6eed4c5e41a1ef512d0e246b7d91f2983ecb21d53d9b14ffca8"} Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.416098 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-0db9-account-create-update-tt5pm" podStartSLOduration=3.416077759 podStartE2EDuration="3.416077759s" podCreationTimestamp="2026-02-16 11:30:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:05.40033705 +0000 UTC m=+1395.029671235" watchObservedRunningTime="2026-02-16 11:30:05.416077759 +0000 UTC m=+1395.045411924" Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.417336 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6n2w7" event={"ID":"79fc5ddc-25fd-4b68-957d-ebe934bc9388","Type":"ContainerStarted","Data":"fcc8559383e59096b9f87048c867e8afa15226d55d9e68cffbfebb89c3962d0e"} Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.439255 4949 generic.go:334] "Generic (PLEG): container finished" podID="ada0c1eb-48c6-4d78-b57f-b65979ffbaa4" containerID="d53f54521d581e8e56bef68155f4463c0a42f007eb181629044e5f7281a7e206" exitCode=0 Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.439405 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-npkkr" event={"ID":"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4","Type":"ContainerDied","Data":"d53f54521d581e8e56bef68155f4463c0a42f007eb181629044e5f7281a7e206"} Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.446800 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" event={"ID":"cb474750-e885-4f04-b26c-fedb8cc342ce","Type":"ContainerDied","Data":"5a5d1b85fa6f9ff3698b2cca96edf5d1ceffd6a9b1eb98719d6b4827790dec24"} Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.446892 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a5d1b85fa6f9ff3698b2cca96edf5d1ceffd6a9b1eb98719d6b4827790dec24" Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.465365 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-7e44-account-create-update-pzvfn" podStartSLOduration=4.465343416 podStartE2EDuration="4.465343416s" podCreationTimestamp="2026-02-16 11:30:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:05.427090984 +0000 UTC m=+1395.056425139" watchObservedRunningTime="2026-02-16 11:30:05.465343416 +0000 UTC m=+1395.094677581" Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.488056 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-xg9xl" podStartSLOduration=4.488029793 podStartE2EDuration="4.488029793s" podCreationTimestamp="2026-02-16 11:30:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:05.460987761 +0000 UTC m=+1395.090321936" watchObservedRunningTime="2026-02-16 11:30:05.488029793 +0000 UTC m=+1395.117363958" Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.502730 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-6n2w7" podStartSLOduration=4.502707772 podStartE2EDuration="4.502707772s" podCreationTimestamp="2026-02-16 11:30:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:05.480618332 +0000 UTC m=+1395.109952517" watchObservedRunningTime="2026-02-16 11:30:05.502707772 +0000 UTC m=+1395.132041937" Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.861539 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.962281 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnks8\" (UniqueName: \"kubernetes.io/projected/cb474750-e885-4f04-b26c-fedb8cc342ce-kube-api-access-wnks8\") pod \"cb474750-e885-4f04-b26c-fedb8cc342ce\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.962478 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb474750-e885-4f04-b26c-fedb8cc342ce-config-volume\") pod \"cb474750-e885-4f04-b26c-fedb8cc342ce\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.962617 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb474750-e885-4f04-b26c-fedb8cc342ce-secret-volume\") pod \"cb474750-e885-4f04-b26c-fedb8cc342ce\" (UID: \"cb474750-e885-4f04-b26c-fedb8cc342ce\") " Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.963547 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb474750-e885-4f04-b26c-fedb8cc342ce-config-volume" (OuterVolumeSpecName: "config-volume") pod "cb474750-e885-4f04-b26c-fedb8cc342ce" (UID: "cb474750-e885-4f04-b26c-fedb8cc342ce"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.969048 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb474750-e885-4f04-b26c-fedb8cc342ce-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cb474750-e885-4f04-b26c-fedb8cc342ce" (UID: "cb474750-e885-4f04-b26c-fedb8cc342ce"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:05 crc kubenswrapper[4949]: I0216 11:30:05.969271 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb474750-e885-4f04-b26c-fedb8cc342ce-kube-api-access-wnks8" (OuterVolumeSpecName: "kube-api-access-wnks8") pod "cb474750-e885-4f04-b26c-fedb8cc342ce" (UID: "cb474750-e885-4f04-b26c-fedb8cc342ce"). InnerVolumeSpecName "kube-api-access-wnks8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.066978 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnks8\" (UniqueName: \"kubernetes.io/projected/cb474750-e885-4f04-b26c-fedb8cc342ce-kube-api-access-wnks8\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.067026 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb474750-e885-4f04-b26c-fedb8cc342ce-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.067038 4949 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb474750-e885-4f04-b26c-fedb8cc342ce-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.466811 4949 generic.go:334] "Generic (PLEG): container finished" podID="43ddf580-ace8-4f9c-b767-12a5514ac753" containerID="a845ea160e9fc1fe6041b77fba350f61cef7f868015422628c23c23f4119b2b2" exitCode=0 Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.466877 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f4ctx" event={"ID":"43ddf580-ace8-4f9c-b767-12a5514ac753","Type":"ContainerDied","Data":"a845ea160e9fc1fe6041b77fba350f61cef7f868015422628c23c23f4119b2b2"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.471865 4949 generic.go:334] "Generic (PLEG): container finished" podID="81ef1e0f-3024-4742-952e-44fe02054f6d" containerID="19c6e14200617dc051c50463cc0dc5e299b5f18d971cc76865078b394c97dbb1" exitCode=0 Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.471951 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0db9-account-create-update-tt5pm" event={"ID":"81ef1e0f-3024-4742-952e-44fe02054f6d","Type":"ContainerDied","Data":"19c6e14200617dc051c50463cc0dc5e299b5f18d971cc76865078b394c97dbb1"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.474491 4949 generic.go:334] "Generic (PLEG): container finished" podID="c4546872-04ea-4396-acf2-380838b528d0" containerID="2cbfdf823955255b3693e9c694a8753eea5f4d2737563c0051726e6d03631f36" exitCode=0 Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.474563 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7ff2-account-create-update-c6fg9" event={"ID":"c4546872-04ea-4396-acf2-380838b528d0","Type":"ContainerDied","Data":"2cbfdf823955255b3693e9c694a8753eea5f4d2737563c0051726e6d03631f36"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.474586 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7ff2-account-create-update-c6fg9" event={"ID":"c4546872-04ea-4396-acf2-380838b528d0","Type":"ContainerStarted","Data":"250e2f924023d0be24f8162da7ffbc79cfe643902fd081d044f40e9d21531e49"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.476408 4949 generic.go:334] "Generic (PLEG): container finished" podID="d5f53b06-283f-465d-93ba-4366a5e2147c" containerID="8abc2f2ddd1dfa9f929835d468021b1acaf6cbfeb080fe16f591723d85b32a41" exitCode=0 Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.476488 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xg9xl" event={"ID":"d5f53b06-283f-465d-93ba-4366a5e2147c","Type":"ContainerDied","Data":"8abc2f2ddd1dfa9f929835d468021b1acaf6cbfeb080fe16f591723d85b32a41"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.483715 4949 generic.go:334] "Generic (PLEG): container finished" podID="b74df024-1e28-43d0-b364-9cab63fda88b" containerID="4f151eaf5b83281979d53a553d1543b6885a943977217f39f17da98cb7834023" exitCode=0 Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.483819 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-7e44-account-create-update-pzvfn" event={"ID":"b74df024-1e28-43d0-b364-9cab63fda88b","Type":"ContainerDied","Data":"4f151eaf5b83281979d53a553d1543b6885a943977217f39f17da98cb7834023"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.489236 4949 generic.go:334] "Generic (PLEG): container finished" podID="c899f773-b326-4b03-8fbb-87e97cafd63b" containerID="d16675331c2b2e304b9fce68190aa8afa3f402b557759fa009a94792c139b38d" exitCode=0 Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.489374 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d46b-account-create-update-2f9k8" event={"ID":"c899f773-b326-4b03-8fbb-87e97cafd63b","Type":"ContainerDied","Data":"d16675331c2b2e304b9fce68190aa8afa3f402b557759fa009a94792c139b38d"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.489435 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d46b-account-create-update-2f9k8" event={"ID":"c899f773-b326-4b03-8fbb-87e97cafd63b","Type":"ContainerStarted","Data":"996c02125d2525d59b63c3b6ddb855acd5ff564628ae957176fa2099f417fe27"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.492211 4949 generic.go:334] "Generic (PLEG): container finished" podID="79fc5ddc-25fd-4b68-957d-ebe934bc9388" containerID="fcc8559383e59096b9f87048c867e8afa15226d55d9e68cffbfebb89c3962d0e" exitCode=0 Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.492283 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6n2w7" event={"ID":"79fc5ddc-25fd-4b68-957d-ebe934bc9388","Type":"ContainerDied","Data":"fcc8559383e59096b9f87048c867e8afa15226d55d9e68cffbfebb89c3962d0e"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.494062 4949 generic.go:334] "Generic (PLEG): container finished" podID="e8565369-4065-464b-8f76-56b3689744e9" containerID="b1a57639c8bb360cec15ae38aa79cd0d6e8ecbcf1ff88db78f0698efc1b63498" exitCode=0 Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.494289 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e8565369-4065-464b-8f76-56b3689744e9","Type":"ContainerDied","Data":"b1a57639c8bb360cec15ae38aa79cd0d6e8ecbcf1ff88db78f0698efc1b63498"} Feb 16 11:30:06 crc kubenswrapper[4949]: I0216 11:30:06.495783 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4" Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.160690 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.205094 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-operator-scripts\") pod \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\" (UID: \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\") " Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.205581 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mv6n\" (UniqueName: \"kubernetes.io/projected/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-kube-api-access-5mv6n\") pod \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\" (UID: \"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4\") " Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.205637 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ada0c1eb-48c6-4d78-b57f-b65979ffbaa4" (UID: "ada0c1eb-48c6-4d78-b57f-b65979ffbaa4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.206246 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.211737 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-kube-api-access-5mv6n" (OuterVolumeSpecName: "kube-api-access-5mv6n") pod "ada0c1eb-48c6-4d78-b57f-b65979ffbaa4" (UID: "ada0c1eb-48c6-4d78-b57f-b65979ffbaa4"). InnerVolumeSpecName "kube-api-access-5mv6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.310350 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mv6n\" (UniqueName: \"kubernetes.io/projected/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4-kube-api-access-5mv6n\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.526570 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e8565369-4065-464b-8f76-56b3689744e9","Type":"ContainerStarted","Data":"a93a729db328a8b0aaa38515a1eca5bf2d99a79e6ee6a64f8d4505ca457a0c09"} Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.532547 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-npkkr" event={"ID":"ada0c1eb-48c6-4d78-b57f-b65979ffbaa4","Type":"ContainerDied","Data":"43fb5d1b62595ebfd5f3aeb71133328d2dad10fada06feef6873810b753f49be"} Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.532625 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43fb5d1b62595ebfd5f3aeb71133328d2dad10fada06feef6873810b753f49be" Feb 16 11:30:07 crc kubenswrapper[4949]: I0216 11:30:07.534536 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-npkkr" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.048469 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.122455 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2q49p"] Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.122758 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-2q49p" podUID="e03032a6-a757-4270-bd4f-b84532ffcb4b" containerName="dnsmasq-dns" containerID="cri-o://25db854f1d2f4f82868e63729e2a554a66b6f0c26bf15931e5b85827ce96be97" gracePeriod=10 Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.565075 4949 generic.go:334] "Generic (PLEG): container finished" podID="e03032a6-a757-4270-bd4f-b84532ffcb4b" containerID="25db854f1d2f4f82868e63729e2a554a66b6f0c26bf15931e5b85827ce96be97" exitCode=0 Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.565185 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2q49p" event={"ID":"e03032a6-a757-4270-bd4f-b84532ffcb4b","Type":"ContainerDied","Data":"25db854f1d2f4f82868e63729e2a554a66b6f0c26bf15931e5b85827ce96be97"} Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.567507 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6n2w7" event={"ID":"79fc5ddc-25fd-4b68-957d-ebe934bc9388","Type":"ContainerDied","Data":"6b3fe74d7cf6af923bd6712c71704a1660edd60a64c0fcfcf483472d3de9b373"} Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.567551 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b3fe74d7cf6af923bd6712c71704a1660edd60a64c0fcfcf483472d3de9b373" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.569245 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-f4ctx" event={"ID":"43ddf580-ace8-4f9c-b767-12a5514ac753","Type":"ContainerDied","Data":"90954bd8fb49b73b58c7c5dc29aae7651086f291173b24852af3a6cd070620cb"} Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.569270 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90954bd8fb49b73b58c7c5dc29aae7651086f291173b24852af3a6cd070620cb" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.571743 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0db9-account-create-update-tt5pm" event={"ID":"81ef1e0f-3024-4742-952e-44fe02054f6d","Type":"ContainerDied","Data":"65a061e7476c49e432f1a5786507387c2cc070bed1dc2d3a8ffa7fa2bb2da0dd"} Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.572002 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65a061e7476c49e432f1a5786507387c2cc070bed1dc2d3a8ffa7fa2bb2da0dd" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.573405 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7ff2-account-create-update-c6fg9" event={"ID":"c4546872-04ea-4396-acf2-380838b528d0","Type":"ContainerDied","Data":"250e2f924023d0be24f8162da7ffbc79cfe643902fd081d044f40e9d21531e49"} Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.573428 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="250e2f924023d0be24f8162da7ffbc79cfe643902fd081d044f40e9d21531e49" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.575696 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xg9xl" event={"ID":"d5f53b06-283f-465d-93ba-4366a5e2147c","Type":"ContainerDied","Data":"365e55270b72d7249a7641b519d059c382ea6330171d9bcdd82172b83f8818c2"} Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.575722 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="365e55270b72d7249a7641b519d059c382ea6330171d9bcdd82172b83f8818c2" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.579528 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-7e44-account-create-update-pzvfn" event={"ID":"b74df024-1e28-43d0-b364-9cab63fda88b","Type":"ContainerDied","Data":"5ade8fbf90c562f875e653aad4998aae166a39e953cf056258d14434bbcdc075"} Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.579571 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ade8fbf90c562f875e653aad4998aae166a39e953cf056258d14434bbcdc075" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.584733 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d46b-account-create-update-2f9k8" event={"ID":"c899f773-b326-4b03-8fbb-87e97cafd63b","Type":"ContainerDied","Data":"996c02125d2525d59b63c3b6ddb855acd5ff564628ae957176fa2099f417fe27"} Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.584778 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="996c02125d2525d59b63c3b6ddb855acd5ff564628ae957176fa2099f417fe27" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.714269 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.735667 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.749733 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.760740 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.789377 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.789814 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.799124 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.838633 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2v7s\" (UniqueName: \"kubernetes.io/projected/d5f53b06-283f-465d-93ba-4366a5e2147c-kube-api-access-t2v7s\") pod \"d5f53b06-283f-465d-93ba-4366a5e2147c\" (UID: \"d5f53b06-283f-465d-93ba-4366a5e2147c\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.838694 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43ddf580-ace8-4f9c-b767-12a5514ac753-operator-scripts\") pod \"43ddf580-ace8-4f9c-b767-12a5514ac753\" (UID: \"43ddf580-ace8-4f9c-b767-12a5514ac753\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.838768 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvtpb\" (UniqueName: \"kubernetes.io/projected/79fc5ddc-25fd-4b68-957d-ebe934bc9388-kube-api-access-jvtpb\") pod \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\" (UID: \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.838851 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c899f773-b326-4b03-8fbb-87e97cafd63b-operator-scripts\") pod \"c899f773-b326-4b03-8fbb-87e97cafd63b\" (UID: \"c899f773-b326-4b03-8fbb-87e97cafd63b\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.838937 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b74df024-1e28-43d0-b364-9cab63fda88b-operator-scripts\") pod \"b74df024-1e28-43d0-b364-9cab63fda88b\" (UID: \"b74df024-1e28-43d0-b364-9cab63fda88b\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.838988 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pqqz\" (UniqueName: \"kubernetes.io/projected/b74df024-1e28-43d0-b364-9cab63fda88b-kube-api-access-6pqqz\") pod \"b74df024-1e28-43d0-b364-9cab63fda88b\" (UID: \"b74df024-1e28-43d0-b364-9cab63fda88b\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.839050 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79fc5ddc-25fd-4b68-957d-ebe934bc9388-operator-scripts\") pod \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\" (UID: \"79fc5ddc-25fd-4b68-957d-ebe934bc9388\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.839078 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzfql\" (UniqueName: \"kubernetes.io/projected/81ef1e0f-3024-4742-952e-44fe02054f6d-kube-api-access-hzfql\") pod \"81ef1e0f-3024-4742-952e-44fe02054f6d\" (UID: \"81ef1e0f-3024-4742-952e-44fe02054f6d\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.839132 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5f53b06-283f-465d-93ba-4366a5e2147c-operator-scripts\") pod \"d5f53b06-283f-465d-93ba-4366a5e2147c\" (UID: \"d5f53b06-283f-465d-93ba-4366a5e2147c\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.839190 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6lf8\" (UniqueName: \"kubernetes.io/projected/c4546872-04ea-4396-acf2-380838b528d0-kube-api-access-r6lf8\") pod \"c4546872-04ea-4396-acf2-380838b528d0\" (UID: \"c4546872-04ea-4396-acf2-380838b528d0\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.839254 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tz25q\" (UniqueName: \"kubernetes.io/projected/c899f773-b326-4b03-8fbb-87e97cafd63b-kube-api-access-tz25q\") pod \"c899f773-b326-4b03-8fbb-87e97cafd63b\" (UID: \"c899f773-b326-4b03-8fbb-87e97cafd63b\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.839305 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4546872-04ea-4396-acf2-380838b528d0-operator-scripts\") pod \"c4546872-04ea-4396-acf2-380838b528d0\" (UID: \"c4546872-04ea-4396-acf2-380838b528d0\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.839330 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w42pr\" (UniqueName: \"kubernetes.io/projected/43ddf580-ace8-4f9c-b767-12a5514ac753-kube-api-access-w42pr\") pod \"43ddf580-ace8-4f9c-b767-12a5514ac753\" (UID: \"43ddf580-ace8-4f9c-b767-12a5514ac753\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.839388 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81ef1e0f-3024-4742-952e-44fe02054f6d-operator-scripts\") pod \"81ef1e0f-3024-4742-952e-44fe02054f6d\" (UID: \"81ef1e0f-3024-4742-952e-44fe02054f6d\") " Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.840579 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81ef1e0f-3024-4742-952e-44fe02054f6d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "81ef1e0f-3024-4742-952e-44fe02054f6d" (UID: "81ef1e0f-3024-4742-952e-44fe02054f6d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.840594 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79fc5ddc-25fd-4b68-957d-ebe934bc9388-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "79fc5ddc-25fd-4b68-957d-ebe934bc9388" (UID: "79fc5ddc-25fd-4b68-957d-ebe934bc9388"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.842848 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4546872-04ea-4396-acf2-380838b528d0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c4546872-04ea-4396-acf2-380838b528d0" (UID: "c4546872-04ea-4396-acf2-380838b528d0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.847300 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b74df024-1e28-43d0-b364-9cab63fda88b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b74df024-1e28-43d0-b364-9cab63fda88b" (UID: "b74df024-1e28-43d0-b364-9cab63fda88b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.847462 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c899f773-b326-4b03-8fbb-87e97cafd63b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c899f773-b326-4b03-8fbb-87e97cafd63b" (UID: "c899f773-b326-4b03-8fbb-87e97cafd63b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.847738 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c899f773-b326-4b03-8fbb-87e97cafd63b-kube-api-access-tz25q" (OuterVolumeSpecName: "kube-api-access-tz25q") pod "c899f773-b326-4b03-8fbb-87e97cafd63b" (UID: "c899f773-b326-4b03-8fbb-87e97cafd63b"). InnerVolumeSpecName "kube-api-access-tz25q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.847933 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5f53b06-283f-465d-93ba-4366a5e2147c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d5f53b06-283f-465d-93ba-4366a5e2147c" (UID: "d5f53b06-283f-465d-93ba-4366a5e2147c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.849241 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43ddf580-ace8-4f9c-b767-12a5514ac753-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "43ddf580-ace8-4f9c-b767-12a5514ac753" (UID: "43ddf580-ace8-4f9c-b767-12a5514ac753"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.849298 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5f53b06-283f-465d-93ba-4366a5e2147c-kube-api-access-t2v7s" (OuterVolumeSpecName: "kube-api-access-t2v7s") pod "d5f53b06-283f-465d-93ba-4366a5e2147c" (UID: "d5f53b06-283f-465d-93ba-4366a5e2147c"). InnerVolumeSpecName "kube-api-access-t2v7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.851080 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4546872-04ea-4396-acf2-380838b528d0-kube-api-access-r6lf8" (OuterVolumeSpecName: "kube-api-access-r6lf8") pod "c4546872-04ea-4396-acf2-380838b528d0" (UID: "c4546872-04ea-4396-acf2-380838b528d0"). InnerVolumeSpecName "kube-api-access-r6lf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.852958 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79fc5ddc-25fd-4b68-957d-ebe934bc9388-kube-api-access-jvtpb" (OuterVolumeSpecName: "kube-api-access-jvtpb") pod "79fc5ddc-25fd-4b68-957d-ebe934bc9388" (UID: "79fc5ddc-25fd-4b68-957d-ebe934bc9388"). InnerVolumeSpecName "kube-api-access-jvtpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.854936 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b74df024-1e28-43d0-b364-9cab63fda88b-kube-api-access-6pqqz" (OuterVolumeSpecName: "kube-api-access-6pqqz") pod "b74df024-1e28-43d0-b364-9cab63fda88b" (UID: "b74df024-1e28-43d0-b364-9cab63fda88b"). InnerVolumeSpecName "kube-api-access-6pqqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.855266 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43ddf580-ace8-4f9c-b767-12a5514ac753-kube-api-access-w42pr" (OuterVolumeSpecName: "kube-api-access-w42pr") pod "43ddf580-ace8-4f9c-b767-12a5514ac753" (UID: "43ddf580-ace8-4f9c-b767-12a5514ac753"). InnerVolumeSpecName "kube-api-access-w42pr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.860825 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81ef1e0f-3024-4742-952e-44fe02054f6d-kube-api-access-hzfql" (OuterVolumeSpecName: "kube-api-access-hzfql") pod "81ef1e0f-3024-4742-952e-44fe02054f6d" (UID: "81ef1e0f-3024-4742-952e-44fe02054f6d"). InnerVolumeSpecName "kube-api-access-hzfql". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941541 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79fc5ddc-25fd-4b68-957d-ebe934bc9388-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941577 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzfql\" (UniqueName: \"kubernetes.io/projected/81ef1e0f-3024-4742-952e-44fe02054f6d-kube-api-access-hzfql\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941595 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5f53b06-283f-465d-93ba-4366a5e2147c-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941606 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6lf8\" (UniqueName: \"kubernetes.io/projected/c4546872-04ea-4396-acf2-380838b528d0-kube-api-access-r6lf8\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941615 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tz25q\" (UniqueName: \"kubernetes.io/projected/c899f773-b326-4b03-8fbb-87e97cafd63b-kube-api-access-tz25q\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941625 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4546872-04ea-4396-acf2-380838b528d0-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941633 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w42pr\" (UniqueName: \"kubernetes.io/projected/43ddf580-ace8-4f9c-b767-12a5514ac753-kube-api-access-w42pr\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941643 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81ef1e0f-3024-4742-952e-44fe02054f6d-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941651 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2v7s\" (UniqueName: \"kubernetes.io/projected/d5f53b06-283f-465d-93ba-4366a5e2147c-kube-api-access-t2v7s\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941660 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43ddf580-ace8-4f9c-b767-12a5514ac753-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941668 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvtpb\" (UniqueName: \"kubernetes.io/projected/79fc5ddc-25fd-4b68-957d-ebe934bc9388-kube-api-access-jvtpb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941676 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c899f773-b326-4b03-8fbb-87e97cafd63b-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941685 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b74df024-1e28-43d0-b364-9cab63fda88b-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:10 crc kubenswrapper[4949]: I0216 11:30:10.941694 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pqqz\" (UniqueName: \"kubernetes.io/projected/b74df024-1e28-43d0-b364-9cab63fda88b-kube-api-access-6pqqz\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.037239 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.147692 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-dns-svc\") pod \"e03032a6-a757-4270-bd4f-b84532ffcb4b\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.147982 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-sb\") pod \"e03032a6-a757-4270-bd4f-b84532ffcb4b\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.148053 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-nb\") pod \"e03032a6-a757-4270-bd4f-b84532ffcb4b\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.148285 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-config\") pod \"e03032a6-a757-4270-bd4f-b84532ffcb4b\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.148329 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5xnm\" (UniqueName: \"kubernetes.io/projected/e03032a6-a757-4270-bd4f-b84532ffcb4b-kube-api-access-x5xnm\") pod \"e03032a6-a757-4270-bd4f-b84532ffcb4b\" (UID: \"e03032a6-a757-4270-bd4f-b84532ffcb4b\") " Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.155279 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e03032a6-a757-4270-bd4f-b84532ffcb4b-kube-api-access-x5xnm" (OuterVolumeSpecName: "kube-api-access-x5xnm") pod "e03032a6-a757-4270-bd4f-b84532ffcb4b" (UID: "e03032a6-a757-4270-bd4f-b84532ffcb4b"). InnerVolumeSpecName "kube-api-access-x5xnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.218116 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-config" (OuterVolumeSpecName: "config") pod "e03032a6-a757-4270-bd4f-b84532ffcb4b" (UID: "e03032a6-a757-4270-bd4f-b84532ffcb4b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.224185 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e03032a6-a757-4270-bd4f-b84532ffcb4b" (UID: "e03032a6-a757-4270-bd4f-b84532ffcb4b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.227966 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e03032a6-a757-4270-bd4f-b84532ffcb4b" (UID: "e03032a6-a757-4270-bd4f-b84532ffcb4b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.234406 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e03032a6-a757-4270-bd4f-b84532ffcb4b" (UID: "e03032a6-a757-4270-bd4f-b84532ffcb4b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.262913 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.265570 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5xnm\" (UniqueName: \"kubernetes.io/projected/e03032a6-a757-4270-bd4f-b84532ffcb4b-kube-api-access-x5xnm\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.266877 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.266925 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.266940 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e03032a6-a757-4270-bd4f-b84532ffcb4b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:11 crc kubenswrapper[4949]: E0216 11:30:11.493787 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79fc5ddc_25fd_4b68_957d_ebe934bc9388.slice/crio-6b3fe74d7cf6af923bd6712c71704a1660edd60a64c0fcfcf483472d3de9b373\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4546872_04ea_4396_acf2_380838b528d0.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43ddf580_ace8_4f9c_b767_12a5514ac753.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43ddf580_ace8_4f9c_b767_12a5514ac753.slice/crio-90954bd8fb49b73b58c7c5dc29aae7651086f291173b24852af3a6cd070620cb\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb74df024_1e28_43d0_b364_9cab63fda88b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode03032a6_a757_4270_bd4f_b84532ffcb4b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb74df024_1e28_43d0_b364_9cab63fda88b.slice/crio-5ade8fbf90c562f875e653aad4998aae166a39e953cf056258d14434bbcdc075\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5f53b06_283f_465d_93ba_4366a5e2147c.slice/crio-365e55270b72d7249a7641b519d059c382ea6330171d9bcdd82172b83f8818c2\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc899f773_b326_4b03_8fbb_87e97cafd63b.slice/crio-996c02125d2525d59b63c3b6ddb855acd5ff564628ae957176fa2099f417fe27\": RecentStats: unable to find data in memory cache]" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.597298 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-nv9nl" event={"ID":"63e31d0f-ee9a-49ae-8699-2915a121ac10","Type":"ContainerStarted","Data":"64fac376aa3bc1cdb5c65b3d6ff8d56eaf9880de805569a9547dc722adec71b9"} Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.600379 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2q49p" event={"ID":"e03032a6-a757-4270-bd4f-b84532ffcb4b","Type":"ContainerDied","Data":"f271253b6b3cbd166a0bb136ee89ef27a7ab04515cbab3a4dd0f8de6dedfe9df"} Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.600439 4949 scope.go:117] "RemoveContainer" containerID="25db854f1d2f4f82868e63729e2a554a66b6f0c26bf15931e5b85827ce96be97" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.600611 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2q49p" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606491 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e8565369-4065-464b-8f76-56b3689744e9","Type":"ContainerStarted","Data":"2f649e1ea4f08fd7b6d3b7c57b46ea074b656d721390b3b31a553fe962a938c0"} Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606543 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d46b-account-create-update-2f9k8" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606604 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xg9xl" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606546 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"e8565369-4065-464b-8f76-56b3689744e9","Type":"ContainerStarted","Data":"c86de5959b1c44248465e4d680f6e3528ffcfe9e730438937410676cfa1f71c4"} Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606738 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6n2w7" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606543 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-f4ctx" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606804 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-7e44-account-create-update-pzvfn" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606493 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0db9-account-create-update-tt5pm" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.606849 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7ff2-account-create-update-c6fg9" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.628576 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-nv9nl" podStartSLOduration=2.93543848 podStartE2EDuration="9.628554204s" podCreationTimestamp="2026-02-16 11:30:02 +0000 UTC" firstStartedPulling="2026-02-16 11:30:04.277763279 +0000 UTC m=+1393.907097454" lastFinishedPulling="2026-02-16 11:30:10.970879023 +0000 UTC m=+1400.600213178" observedRunningTime="2026-02-16 11:30:11.619405263 +0000 UTC m=+1401.248739428" watchObservedRunningTime="2026-02-16 11:30:11.628554204 +0000 UTC m=+1401.257888359" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.655527 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=18.655496323 podStartE2EDuration="18.655496323s" podCreationTimestamp="2026-02-16 11:29:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:11.645992422 +0000 UTC m=+1401.275326637" watchObservedRunningTime="2026-02-16 11:30:11.655496323 +0000 UTC m=+1401.284830498" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.655796 4949 scope.go:117] "RemoveContainer" containerID="673c05b826e22c4a9c97f9ffe8b8188ffc64e0296990a49e1581aa341b58fd4b" Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.681878 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2q49p"] Feb 16 11:30:11 crc kubenswrapper[4949]: I0216 11:30:11.695420 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2q49p"] Feb 16 11:30:13 crc kubenswrapper[4949]: I0216 11:30:13.247990 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e03032a6-a757-4270-bd4f-b84532ffcb4b" path="/var/lib/kubelet/pods/e03032a6-a757-4270-bd4f-b84532ffcb4b/volumes" Feb 16 11:30:14 crc kubenswrapper[4949]: I0216 11:30:14.191650 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Feb 16 11:30:14 crc kubenswrapper[4949]: I0216 11:30:14.641892 4949 generic.go:334] "Generic (PLEG): container finished" podID="63e31d0f-ee9a-49ae-8699-2915a121ac10" containerID="64fac376aa3bc1cdb5c65b3d6ff8d56eaf9880de805569a9547dc722adec71b9" exitCode=0 Feb 16 11:30:14 crc kubenswrapper[4949]: I0216 11:30:14.641979 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-nv9nl" event={"ID":"63e31d0f-ee9a-49ae-8699-2915a121ac10","Type":"ContainerDied","Data":"64fac376aa3bc1cdb5c65b3d6ff8d56eaf9880de805569a9547dc722adec71b9"} Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.075060 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.095984 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-config-data\") pod \"63e31d0f-ee9a-49ae-8699-2915a121ac10\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.096271 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-combined-ca-bundle\") pod \"63e31d0f-ee9a-49ae-8699-2915a121ac10\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.096399 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfjj6\" (UniqueName: \"kubernetes.io/projected/63e31d0f-ee9a-49ae-8699-2915a121ac10-kube-api-access-gfjj6\") pod \"63e31d0f-ee9a-49ae-8699-2915a121ac10\" (UID: \"63e31d0f-ee9a-49ae-8699-2915a121ac10\") " Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.104351 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63e31d0f-ee9a-49ae-8699-2915a121ac10-kube-api-access-gfjj6" (OuterVolumeSpecName: "kube-api-access-gfjj6") pod "63e31d0f-ee9a-49ae-8699-2915a121ac10" (UID: "63e31d0f-ee9a-49ae-8699-2915a121ac10"). InnerVolumeSpecName "kube-api-access-gfjj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.131428 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63e31d0f-ee9a-49ae-8699-2915a121ac10" (UID: "63e31d0f-ee9a-49ae-8699-2915a121ac10"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.157234 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-config-data" (OuterVolumeSpecName: "config-data") pod "63e31d0f-ee9a-49ae-8699-2915a121ac10" (UID: "63e31d0f-ee9a-49ae-8699-2915a121ac10"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.199550 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfjj6\" (UniqueName: \"kubernetes.io/projected/63e31d0f-ee9a-49ae-8699-2915a121ac10-kube-api-access-gfjj6\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.199582 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.199592 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e31d0f-ee9a-49ae-8699-2915a121ac10-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.663969 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-nv9nl" event={"ID":"63e31d0f-ee9a-49ae-8699-2915a121ac10","Type":"ContainerDied","Data":"d33a099b8483c6eed4c5e41a1ef512d0e246b7d91f2983ecb21d53d9b14ffca8"} Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.664014 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d33a099b8483c6eed4c5e41a1ef512d0e246b7d91f2983ecb21d53d9b14ffca8" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.664446 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-nv9nl" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.983321 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq"] Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.983936 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ada0c1eb-48c6-4d78-b57f-b65979ffbaa4" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.983958 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="ada0c1eb-48c6-4d78-b57f-b65979ffbaa4" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.983972 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4546872-04ea-4396-acf2-380838b528d0" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.983978 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4546872-04ea-4396-acf2-380838b528d0" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.983991 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c899f773-b326-4b03-8fbb-87e97cafd63b" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.983998 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c899f773-b326-4b03-8fbb-87e97cafd63b" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984013 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5f53b06-283f-465d-93ba-4366a5e2147c" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984019 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5f53b06-283f-465d-93ba-4366a5e2147c" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984034 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e03032a6-a757-4270-bd4f-b84532ffcb4b" containerName="init" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984040 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e03032a6-a757-4270-bd4f-b84532ffcb4b" containerName="init" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984051 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fc5ddc-25fd-4b68-957d-ebe934bc9388" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984058 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fc5ddc-25fd-4b68-957d-ebe934bc9388" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984067 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ef1e0f-3024-4742-952e-44fe02054f6d" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984072 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ef1e0f-3024-4742-952e-44fe02054f6d" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984083 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43ddf580-ace8-4f9c-b767-12a5514ac753" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984089 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="43ddf580-ace8-4f9c-b767-12a5514ac753" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984100 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63e31d0f-ee9a-49ae-8699-2915a121ac10" containerName="keystone-db-sync" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984106 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="63e31d0f-ee9a-49ae-8699-2915a121ac10" containerName="keystone-db-sync" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984121 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e03032a6-a757-4270-bd4f-b84532ffcb4b" containerName="dnsmasq-dns" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984127 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e03032a6-a757-4270-bd4f-b84532ffcb4b" containerName="dnsmasq-dns" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984137 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b74df024-1e28-43d0-b364-9cab63fda88b" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984145 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b74df024-1e28-43d0-b364-9cab63fda88b" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: E0216 11:30:16.984154 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb474750-e885-4f04-b26c-fedb8cc342ce" containerName="collect-profiles" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984160 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb474750-e885-4f04-b26c-fedb8cc342ce" containerName="collect-profiles" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984437 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="63e31d0f-ee9a-49ae-8699-2915a121ac10" containerName="keystone-db-sync" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984451 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c899f773-b326-4b03-8fbb-87e97cafd63b" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984464 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="ada0c1eb-48c6-4d78-b57f-b65979ffbaa4" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984475 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb474750-e885-4f04-b26c-fedb8cc342ce" containerName="collect-profiles" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984486 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="43ddf580-ace8-4f9c-b767-12a5514ac753" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984495 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e03032a6-a757-4270-bd4f-b84532ffcb4b" containerName="dnsmasq-dns" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984502 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4546872-04ea-4396-acf2-380838b528d0" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984515 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b74df024-1e28-43d0-b364-9cab63fda88b" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984526 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ef1e0f-3024-4742-952e-44fe02054f6d" containerName="mariadb-account-create-update" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984538 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="79fc5ddc-25fd-4b68-957d-ebe934bc9388" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.984547 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5f53b06-283f-465d-93ba-4366a5e2147c" containerName="mariadb-database-create" Feb 16 11:30:16 crc kubenswrapper[4949]: I0216 11:30:16.985826 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.018593 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.033624 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.033716 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.033773 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-config\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.033909 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-svc\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.033946 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-swift-storage-0\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.033966 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s87ql\" (UniqueName: \"kubernetes.io/projected/af43164c-211d-477d-9e70-21f118ba045d-kube-api-access-s87ql\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.062442 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-gftq4"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.064051 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.071267 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.071678 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.072320 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.072459 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.073190 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-h4zml" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.079966 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gftq4"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136683 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-fernet-keys\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136749 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-combined-ca-bundle\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136792 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-svc\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136827 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-swift-storage-0\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136849 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s87ql\" (UniqueName: \"kubernetes.io/projected/af43164c-211d-477d-9e70-21f118ba045d-kube-api-access-s87ql\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136867 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-config-data\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136889 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmlqs\" (UniqueName: \"kubernetes.io/projected/13322d8f-5df8-488f-bead-b6b580b0b424-kube-api-access-cmlqs\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136930 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.136973 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-scripts\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.137001 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.137057 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-config\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.137088 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-credential-keys\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.138303 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-svc\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.138898 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-swift-storage-0\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.139801 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.140943 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-config\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.141061 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.178182 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-ztzjs"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.179832 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.190261 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s87ql\" (UniqueName: \"kubernetes.io/projected/af43164c-211d-477d-9e70-21f118ba045d-kube-api-access-s87ql\") pod \"dnsmasq-dns-5c5cc7c5ff-5j5hq\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.193577 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.193962 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-f55x5" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.213251 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-ztzjs"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.238776 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-fernet-keys\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.238825 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-combined-ca-bundle\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.238870 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-config-data\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.238891 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmlqs\" (UniqueName: \"kubernetes.io/projected/13322d8f-5df8-488f-bead-b6b580b0b424-kube-api-access-cmlqs\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.238944 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-scripts\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.239015 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-credential-keys\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.239038 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-combined-ca-bundle\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.239057 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-config-data\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.239101 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqq8v\" (UniqueName: \"kubernetes.io/projected/667080fb-b428-4b48-87c9-a955ff09771a-kube-api-access-rqq8v\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.250976 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-scripts\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.251326 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-fernet-keys\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.268388 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-credential-keys\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.268953 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-config-data\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.270048 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-combined-ca-bundle\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.299970 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmlqs\" (UniqueName: \"kubernetes.io/projected/13322d8f-5df8-488f-bead-b6b580b0b424-kube-api-access-cmlqs\") pod \"keystone-bootstrap-gftq4\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.352782 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-combined-ca-bundle\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.353040 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-config-data\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.353082 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqq8v\" (UniqueName: \"kubernetes.io/projected/667080fb-b428-4b48-87c9-a955ff09771a-kube-api-access-rqq8v\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.357589 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-vh5z2"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.357879 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.359283 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.365891 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.366511 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.370433 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-x8kqq" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.375616 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-config-data\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.387809 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-combined-ca-bundle\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.397543 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.403427 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-vh5z2"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.404211 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqq8v\" (UniqueName: \"kubernetes.io/projected/667080fb-b428-4b48-87c9-a955ff09771a-kube-api-access-rqq8v\") pod \"heat-db-sync-ztzjs\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.515453 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-wbd7n"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.527611 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.531697 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.531957 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-2kzh5" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.557153 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-t8dz8"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.566982 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.583393 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.583628 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.583822 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-r87xd" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.584413 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.585420 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-combined-ca-bundle\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.585564 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-db-sync-config-data\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.585748 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-scripts\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.585869 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-wbd7n"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.586074 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsjfm\" (UniqueName: \"kubernetes.io/projected/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-kube-api-access-vsjfm\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.586240 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-etc-machine-id\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.586408 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-config-data\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.605410 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-t8dz8"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.617054 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.628990 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-n7hw7"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.630753 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.643781 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.644211 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-twm6t" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.644462 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.652722 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-vhjrk"] Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.676352 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.712326 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmjgv\" (UniqueName: \"kubernetes.io/projected/527d49b7-fb82-4e46-b608-839a2fce0f60-kube-api-access-zmjgv\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.747431 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-db-sync-config-data\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.747874 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-combined-ca-bundle\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.747987 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-db-sync-config-data\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.748206 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-scripts\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.748407 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4b99\" (UniqueName: \"kubernetes.io/projected/f40c7714-8669-4c33-8b1d-e3be853ca911-kube-api-access-x4b99\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.748452 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsjfm\" (UniqueName: \"kubernetes.io/projected/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-kube-api-access-vsjfm\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.748509 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-combined-ca-bundle\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.748653 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-etc-machine-id\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.748779 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-config\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.748880 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-config-data\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.749086 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-combined-ca-bundle\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.767362 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-etc-machine-id\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.801730 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-db-sync-config-data\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.809738 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-combined-ca-bundle\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.836966 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-scripts\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.869605 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-config-data\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.881732 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsjfm\" (UniqueName: \"kubernetes.io/projected/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-kube-api-access-vsjfm\") pod \"cinder-db-sync-vh5z2\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.930489 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmjgv\" (UniqueName: \"kubernetes.io/projected/527d49b7-fb82-4e46-b608-839a2fce0f60-kube-api-access-zmjgv\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.930560 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-scripts\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.930611 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvh7x\" (UniqueName: \"kubernetes.io/projected/18a2cf62-0669-4033-ba7f-c69805fa9c3a-kube-api-access-rvh7x\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.930646 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-combined-ca-bundle\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.930664 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-config\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.930685 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.933046 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-combined-ca-bundle\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.933096 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-db-sync-config-data\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.933137 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.933454 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.933550 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-config-data\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.933899 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4b99\" (UniqueName: \"kubernetes.io/projected/f40c7714-8669-4c33-8b1d-e3be853ca911-kube-api-access-x4b99\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.935600 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-combined-ca-bundle\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.937600 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-config\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.937645 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18a2cf62-0669-4033-ba7f-c69805fa9c3a-logs\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.937824 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.937858 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncwxw\" (UniqueName: \"kubernetes.io/projected/7eac8d6e-e079-41ea-83cb-d4e3a553db13-kube-api-access-ncwxw\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.954854 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.996760 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-db-sync-config-data\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.997940 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-config\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:17 crc kubenswrapper[4949]: I0216 11:30:17.998096 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-combined-ca-bundle\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.016466 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-combined-ca-bundle\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.016937 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmjgv\" (UniqueName: \"kubernetes.io/projected/527d49b7-fb82-4e46-b608-839a2fce0f60-kube-api-access-zmjgv\") pod \"neutron-db-sync-t8dz8\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.017151 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4b99\" (UniqueName: \"kubernetes.io/projected/f40c7714-8669-4c33-8b1d-e3be853ca911-kube-api-access-x4b99\") pod \"barbican-db-sync-wbd7n\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.019449 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.020256 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051514 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-scripts\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051578 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvh7x\" (UniqueName: \"kubernetes.io/projected/18a2cf62-0669-4033-ba7f-c69805fa9c3a-kube-api-access-rvh7x\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051608 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-config\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051627 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051653 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-combined-ca-bundle\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051679 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051715 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051749 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-config-data\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051821 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18a2cf62-0669-4033-ba7f-c69805fa9c3a-logs\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051876 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.051903 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncwxw\" (UniqueName: \"kubernetes.io/projected/7eac8d6e-e079-41ea-83cb-d4e3a553db13-kube-api-access-ncwxw\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.057151 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-n7hw7"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.058197 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.059206 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-config\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.059827 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.061640 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-scripts\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.062371 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.064812 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18a2cf62-0669-4033-ba7f-c69805fa9c3a-logs\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.065594 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.103480 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvh7x\" (UniqueName: \"kubernetes.io/projected/18a2cf62-0669-4033-ba7f-c69805fa9c3a-kube-api-access-rvh7x\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.111160 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-config-data\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.115321 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncwxw\" (UniqueName: \"kubernetes.io/projected/7eac8d6e-e079-41ea-83cb-d4e3a553db13-kube-api-access-ncwxw\") pod \"dnsmasq-dns-8b5c85b87-vhjrk\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.115432 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-combined-ca-bundle\") pod \"placement-db-sync-n7hw7\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.145865 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.154158 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-vhjrk"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.257746 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.260301 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.264850 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.265391 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.265535 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-76nfs" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.265700 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.277606 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.339292 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.381374 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.389150 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.391948 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.393817 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.424670 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.477746 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-logs\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.477822 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.477846 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.477922 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qln5r\" (UniqueName: \"kubernetes.io/projected/cbf839a7-7c2a-4595-b736-601d5559d548-kube-api-access-qln5r\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.477979 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-config-data\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.478010 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-scripts\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.478024 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.479688 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581475 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581518 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581549 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581577 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-logs\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581606 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581636 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581659 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581703 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581846 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qln5r\" (UniqueName: \"kubernetes.io/projected/cbf839a7-7c2a-4595-b736-601d5559d548-kube-api-access-qln5r\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581908 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-config-data\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581935 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-scripts\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581950 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.581986 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfj5n\" (UniqueName: \"kubernetes.io/projected/33f5ed17-eb47-40e9-a499-b1604532f38e-kube-api-access-lfj5n\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.582032 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.582053 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-logs\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.582074 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.583977 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-logs\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.590818 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.592119 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-config-data\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.598725 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.599235 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.599284 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/459835082c7f1564eb79959c4b82a2b1818ffad6c5c0d3df291e5e1cf38dd0a7/globalmount\"" pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.600061 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-scripts\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.601123 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.616248 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qln5r\" (UniqueName: \"kubernetes.io/projected/cbf839a7-7c2a-4595-b736-601d5559d548-kube-api-access-qln5r\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.685024 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfj5n\" (UniqueName: \"kubernetes.io/projected/33f5ed17-eb47-40e9-a499-b1604532f38e-kube-api-access-lfj5n\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.685112 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.685135 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-logs\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.685203 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.685222 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.685252 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.685290 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.685366 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.687502 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-logs\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.688023 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.693388 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.695161 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.697840 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.700374 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.700441 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/eecfdadb463f5cbe330cd09c7447d0c2f21fd30b4fa967afbb7cd97dad6544d3/globalmount\"" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.707378 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.715421 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.720125 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfj5n\" (UniqueName: \"kubernetes.io/projected/33f5ed17-eb47-40e9-a499-b1604532f38e-kube-api-access-lfj5n\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.741207 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.763830 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.767252 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.770938 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.771204 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.790779 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.821708 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.863288 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.883924 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.887143 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" event={"ID":"af43164c-211d-477d-9e70-21f118ba045d","Type":"ContainerStarted","Data":"26c1790647935fb1bd74ad5099027a1e9017f795304ed943cecdb354df6369de"} Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.891948 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v464\" (UniqueName: \"kubernetes.io/projected/c59b957e-c5f8-463f-8228-1051225f5140-kube-api-access-2v464\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.892024 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-run-httpd\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.892056 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-scripts\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.892086 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-config-data\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.892209 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.892301 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.892409 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-log-httpd\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.995334 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-log-httpd\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.995448 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v464\" (UniqueName: \"kubernetes.io/projected/c59b957e-c5f8-463f-8228-1051225f5140-kube-api-access-2v464\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.995488 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-run-httpd\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.995514 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-scripts\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.995546 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-config-data\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.995621 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:18 crc kubenswrapper[4949]: I0216 11:30:18.995673 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.002751 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-log-httpd\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.005692 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-run-httpd\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.009209 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.010079 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-scripts\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.011788 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.017907 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-config-data\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.030355 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v464\" (UniqueName: \"kubernetes.io/projected/c59b957e-c5f8-463f-8228-1051225f5140-kube-api-access-2v464\") pod \"ceilometer-0\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.050444 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gftq4"] Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.065349 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-ztzjs"] Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.096006 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.231715 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-vh5z2"] Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.699809 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-n7hw7"] Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.730330 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-t8dz8"] Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.744327 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-vhjrk"] Feb 16 11:30:19 crc kubenswrapper[4949]: W0216 11:30:19.756975 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7eac8d6e_e079_41ea_83cb_d4e3a553db13.slice/crio-28f7cb3eac55faad405025d43b7dc7bb61b9c936903fd17b4ea1fd7490c44753 WatchSource:0}: Error finding container 28f7cb3eac55faad405025d43b7dc7bb61b9c936903fd17b4ea1fd7490c44753: Status 404 returned error can't find the container with id 28f7cb3eac55faad405025d43b7dc7bb61b9c936903fd17b4ea1fd7490c44753 Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.758096 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-wbd7n"] Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.924730 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-n7hw7" event={"ID":"18a2cf62-0669-4033-ba7f-c69805fa9c3a","Type":"ContainerStarted","Data":"49d854202314dbf79b0dc3bed0f8068dfd54e14921f114b604abf3de2de135d6"} Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.934006 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-t8dz8" event={"ID":"527d49b7-fb82-4e46-b608-839a2fce0f60","Type":"ContainerStarted","Data":"f467af5effe1ad02b5b1ae6d7729b27bea25232505ab2beecac76bf4882aadd7"} Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.936366 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-wbd7n" event={"ID":"f40c7714-8669-4c33-8b1d-e3be853ca911","Type":"ContainerStarted","Data":"6bd8d3a581d0985d084f930e63c9a3ae983cd463fb1e47fce172e71e56a228f4"} Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.947100 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-ztzjs" event={"ID":"667080fb-b428-4b48-87c9-a955ff09771a","Type":"ContainerStarted","Data":"adf7eb23d6ff1b3006ba4d52003ea441cb4033bf313be33c9ac72cf9293583c9"} Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.949384 4949 generic.go:334] "Generic (PLEG): container finished" podID="af43164c-211d-477d-9e70-21f118ba045d" containerID="95cf3d8040c5fed76fd64ee40a537e9f9c7e96c21be2a7645f2c5b999919d64e" exitCode=0 Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.949457 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" event={"ID":"af43164c-211d-477d-9e70-21f118ba045d","Type":"ContainerDied","Data":"95cf3d8040c5fed76fd64ee40a537e9f9c7e96c21be2a7645f2c5b999919d64e"} Feb 16 11:30:19 crc kubenswrapper[4949]: I0216 11:30:19.985373 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vh5z2" event={"ID":"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f","Type":"ContainerStarted","Data":"81057128bb902c4a82b2d4b8fd47d86f6c7f19db48cd55facbecaf8989423567"} Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.065980 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" event={"ID":"7eac8d6e-e079-41ea-83cb-d4e3a553db13","Type":"ContainerStarted","Data":"28f7cb3eac55faad405025d43b7dc7bb61b9c936903fd17b4ea1fd7490c44753"} Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.066044 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gftq4" event={"ID":"13322d8f-5df8-488f-bead-b6b580b0b424","Type":"ContainerStarted","Data":"d67186fb11e74fd25242f5bd451feeedee251d7cce9bf89a9a7455d2d03bd3c5"} Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.066069 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gftq4" event={"ID":"13322d8f-5df8-488f-bead-b6b580b0b424","Type":"ContainerStarted","Data":"b0289a49d80bb4c1110c91dac47bf6381e65404d7289d8a8ae0288327798abaf"} Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.066777 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.102225 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-gftq4" podStartSLOduration=3.102196417 podStartE2EDuration="3.102196417s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:20.080305543 +0000 UTC m=+1409.709639708" watchObservedRunningTime="2026-02-16 11:30:20.102196417 +0000 UTC m=+1409.731530582" Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.140662 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.459156 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.588560 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.656625 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:20 crc kubenswrapper[4949]: W0216 11:30:20.689347 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbf839a7_7c2a_4595_b736_601d5559d548.slice/crio-15269389c418c4695ad72d57557c7d27324adba53595bc0982f53ea02098e1f0 WatchSource:0}: Error finding container 15269389c418c4695ad72d57557c7d27324adba53595bc0982f53ea02098e1f0: Status 404 returned error can't find the container with id 15269389c418c4695ad72d57557c7d27324adba53595bc0982f53ea02098e1f0 Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.715437 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.820927 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.971143 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-swift-storage-0\") pod \"af43164c-211d-477d-9e70-21f118ba045d\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.971270 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-svc\") pod \"af43164c-211d-477d-9e70-21f118ba045d\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.971397 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-nb\") pod \"af43164c-211d-477d-9e70-21f118ba045d\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.971476 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-sb\") pod \"af43164c-211d-477d-9e70-21f118ba045d\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.971538 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s87ql\" (UniqueName: \"kubernetes.io/projected/af43164c-211d-477d-9e70-21f118ba045d-kube-api-access-s87ql\") pod \"af43164c-211d-477d-9e70-21f118ba045d\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.971557 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-config\") pod \"af43164c-211d-477d-9e70-21f118ba045d\" (UID: \"af43164c-211d-477d-9e70-21f118ba045d\") " Feb 16 11:30:20 crc kubenswrapper[4949]: I0216 11:30:20.979625 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af43164c-211d-477d-9e70-21f118ba045d-kube-api-access-s87ql" (OuterVolumeSpecName: "kube-api-access-s87ql") pod "af43164c-211d-477d-9e70-21f118ba045d" (UID: "af43164c-211d-477d-9e70-21f118ba045d"). InnerVolumeSpecName "kube-api-access-s87ql". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.011047 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-config" (OuterVolumeSpecName: "config") pod "af43164c-211d-477d-9e70-21f118ba045d" (UID: "af43164c-211d-477d-9e70-21f118ba045d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.013074 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "af43164c-211d-477d-9e70-21f118ba045d" (UID: "af43164c-211d-477d-9e70-21f118ba045d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.016795 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "af43164c-211d-477d-9e70-21f118ba045d" (UID: "af43164c-211d-477d-9e70-21f118ba045d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.029972 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "af43164c-211d-477d-9e70-21f118ba045d" (UID: "af43164c-211d-477d-9e70-21f118ba045d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.046560 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "af43164c-211d-477d-9e70-21f118ba045d" (UID: "af43164c-211d-477d-9e70-21f118ba045d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.063044 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-t8dz8" event={"ID":"527d49b7-fb82-4e46-b608-839a2fce0f60","Type":"ContainerStarted","Data":"69b1b8aa41d1642ee1aceb6f196fc1457ca4b10b446dd6f7d8b3ba14678525d7"} Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.069482 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c59b957e-c5f8-463f-8228-1051225f5140","Type":"ContainerStarted","Data":"f49736c49e7ee0d3cf1d00ebe30072967923739f33b01ee94dcd5aecb04fe41d"} Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.072355 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cbf839a7-7c2a-4595-b736-601d5559d548","Type":"ContainerStarted","Data":"15269389c418c4695ad72d57557c7d27324adba53595bc0982f53ea02098e1f0"} Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.079643 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.079706 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s87ql\" (UniqueName: \"kubernetes.io/projected/af43164c-211d-477d-9e70-21f118ba045d-kube-api-access-s87ql\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.079717 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.080348 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.080397 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.080408 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af43164c-211d-477d-9e70-21f118ba045d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.083145 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-t8dz8" podStartSLOduration=4.083123365 podStartE2EDuration="4.083123365s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:21.077014121 +0000 UTC m=+1410.706348286" watchObservedRunningTime="2026-02-16 11:30:21.083123365 +0000 UTC m=+1410.712457530" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.087526 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.088283 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq" event={"ID":"af43164c-211d-477d-9e70-21f118ba045d","Type":"ContainerDied","Data":"26c1790647935fb1bd74ad5099027a1e9017f795304ed943cecdb354df6369de"} Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.088375 4949 scope.go:117] "RemoveContainer" containerID="95cf3d8040c5fed76fd64ee40a537e9f9c7e96c21be2a7645f2c5b999919d64e" Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.095906 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33f5ed17-eb47-40e9-a499-b1604532f38e","Type":"ContainerStarted","Data":"37145a37f2682cec75d618b43912fb7885ae68a4d7664e3151876570904ee3f6"} Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.098761 4949 generic.go:334] "Generic (PLEG): container finished" podID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerID="e9fa1d9703742054003fda04f2977335dba3141aee9a450853223e312c93dbeb" exitCode=0 Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.098840 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" event={"ID":"7eac8d6e-e079-41ea-83cb-d4e3a553db13","Type":"ContainerDied","Data":"e9fa1d9703742054003fda04f2977335dba3141aee9a450853223e312c93dbeb"} Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.220480 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq"] Feb 16 11:30:21 crc kubenswrapper[4949]: I0216 11:30:21.312406 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-5j5hq"] Feb 16 11:30:21 crc kubenswrapper[4949]: E0216 11:30:21.654064 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf43164c_211d_477d_9e70_21f118ba045d.slice/crio-26c1790647935fb1bd74ad5099027a1e9017f795304ed943cecdb354df6369de\": RecentStats: unable to find data in memory cache]" Feb 16 11:30:22 crc kubenswrapper[4949]: I0216 11:30:22.131313 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" event={"ID":"7eac8d6e-e079-41ea-83cb-d4e3a553db13","Type":"ContainerStarted","Data":"4755f11655c6804f93135f9e28d881f0998c90db8faf1f8da415f625b1fe096c"} Feb 16 11:30:22 crc kubenswrapper[4949]: I0216 11:30:22.131797 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:22 crc kubenswrapper[4949]: I0216 11:30:22.156628 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" podStartSLOduration=5.156600265 podStartE2EDuration="5.156600265s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:22.148770321 +0000 UTC m=+1411.778104506" watchObservedRunningTime="2026-02-16 11:30:22.156600265 +0000 UTC m=+1411.785934440" Feb 16 11:30:23 crc kubenswrapper[4949]: I0216 11:30:23.150918 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cbf839a7-7c2a-4595-b736-601d5559d548","Type":"ContainerStarted","Data":"cd91806a364c21d1d224bb7a95f370a062b1f7d79513ffd02bfdd697b2db567e"} Feb 16 11:30:23 crc kubenswrapper[4949]: I0216 11:30:23.154700 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33f5ed17-eb47-40e9-a499-b1604532f38e","Type":"ContainerStarted","Data":"a861c7291314c3ccce2249281f01ad0fa0fb456adfd8af36e9e1270d33ebe980"} Feb 16 11:30:23 crc kubenswrapper[4949]: I0216 11:30:23.271360 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af43164c-211d-477d-9e70-21f118ba045d" path="/var/lib/kubelet/pods/af43164c-211d-477d-9e70-21f118ba045d/volumes" Feb 16 11:30:24 crc kubenswrapper[4949]: I0216 11:30:24.191796 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Feb 16 11:30:24 crc kubenswrapper[4949]: I0216 11:30:24.202827 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.197424 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cbf839a7-7c2a-4595-b736-601d5559d548","Type":"ContainerStarted","Data":"47f1a29dc46143814a6286234e23efab431233de3e88a2d1625a47514beafef9"} Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.197638 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" containerName="glance-log" containerID="cri-o://cd91806a364c21d1d224bb7a95f370a062b1f7d79513ffd02bfdd697b2db567e" gracePeriod=30 Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.197677 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" containerName="glance-httpd" containerID="cri-o://47f1a29dc46143814a6286234e23efab431233de3e88a2d1625a47514beafef9" gracePeriod=30 Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.202538 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33f5ed17-eb47-40e9-a499-b1604532f38e","Type":"ContainerStarted","Data":"096a3feef6aa8c035ad8397f106a01d25e81301f345fa43c7141180d39844ad5"} Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.203335 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerName="glance-log" containerID="cri-o://a861c7291314c3ccce2249281f01ad0fa0fb456adfd8af36e9e1270d33ebe980" gracePeriod=30 Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.203495 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerName="glance-httpd" containerID="cri-o://096a3feef6aa8c035ad8397f106a01d25e81301f345fa43c7141180d39844ad5" gracePeriod=30 Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.208733 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.253779 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.253738803 podStartE2EDuration="8.253738803s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:25.231328233 +0000 UTC m=+1414.860662418" watchObservedRunningTime="2026-02-16 11:30:25.253738803 +0000 UTC m=+1414.883072968" Feb 16 11:30:25 crc kubenswrapper[4949]: I0216 11:30:25.276559 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=8.275284298 podStartE2EDuration="8.275284298s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:25.260121475 +0000 UTC m=+1414.889455640" watchObservedRunningTime="2026-02-16 11:30:25.275284298 +0000 UTC m=+1414.904618463" Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.223947 4949 generic.go:334] "Generic (PLEG): container finished" podID="cbf839a7-7c2a-4595-b736-601d5559d548" containerID="47f1a29dc46143814a6286234e23efab431233de3e88a2d1625a47514beafef9" exitCode=0 Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.224299 4949 generic.go:334] "Generic (PLEG): container finished" podID="cbf839a7-7c2a-4595-b736-601d5559d548" containerID="cd91806a364c21d1d224bb7a95f370a062b1f7d79513ffd02bfdd697b2db567e" exitCode=143 Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.224050 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cbf839a7-7c2a-4595-b736-601d5559d548","Type":"ContainerDied","Data":"47f1a29dc46143814a6286234e23efab431233de3e88a2d1625a47514beafef9"} Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.224397 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cbf839a7-7c2a-4595-b736-601d5559d548","Type":"ContainerDied","Data":"cd91806a364c21d1d224bb7a95f370a062b1f7d79513ffd02bfdd697b2db567e"} Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.229237 4949 generic.go:334] "Generic (PLEG): container finished" podID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerID="096a3feef6aa8c035ad8397f106a01d25e81301f345fa43c7141180d39844ad5" exitCode=0 Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.229268 4949 generic.go:334] "Generic (PLEG): container finished" podID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerID="a861c7291314c3ccce2249281f01ad0fa0fb456adfd8af36e9e1270d33ebe980" exitCode=143 Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.229332 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33f5ed17-eb47-40e9-a499-b1604532f38e","Type":"ContainerDied","Data":"096a3feef6aa8c035ad8397f106a01d25e81301f345fa43c7141180d39844ad5"} Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.229402 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33f5ed17-eb47-40e9-a499-b1604532f38e","Type":"ContainerDied","Data":"a861c7291314c3ccce2249281f01ad0fa0fb456adfd8af36e9e1270d33ebe980"} Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.232088 4949 generic.go:334] "Generic (PLEG): container finished" podID="13322d8f-5df8-488f-bead-b6b580b0b424" containerID="d67186fb11e74fd25242f5bd451feeedee251d7cce9bf89a9a7455d2d03bd3c5" exitCode=0 Feb 16 11:30:26 crc kubenswrapper[4949]: I0216 11:30:26.232154 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gftq4" event={"ID":"13322d8f-5df8-488f-bead-b6b580b0b424","Type":"ContainerDied","Data":"d67186fb11e74fd25242f5bd451feeedee251d7cce9bf89a9a7455d2d03bd3c5"} Feb 16 11:30:28 crc kubenswrapper[4949]: I0216 11:30:28.149464 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:28 crc kubenswrapper[4949]: I0216 11:30:28.226657 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-wl8jh"] Feb 16 11:30:28 crc kubenswrapper[4949]: I0216 11:30:28.226989 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="dnsmasq-dns" containerID="cri-o://c7fc92c9e2443a29efc50b9daa8d763effbd9224e5e5382c017f887381e6a1e2" gracePeriod=10 Feb 16 11:30:29 crc kubenswrapper[4949]: I0216 11:30:29.313207 4949 generic.go:334] "Generic (PLEG): container finished" podID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerID="c7fc92c9e2443a29efc50b9daa8d763effbd9224e5e5382c017f887381e6a1e2" exitCode=0 Feb 16 11:30:29 crc kubenswrapper[4949]: I0216 11:30:29.313288 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" event={"ID":"6c60c38e-2850-474f-85f8-2bd436299ebe","Type":"ContainerDied","Data":"c7fc92c9e2443a29efc50b9daa8d763effbd9224e5e5382c017f887381e6a1e2"} Feb 16 11:30:30 crc kubenswrapper[4949]: I0216 11:30:30.048011 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: connect: connection refused" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.412954 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-g76dr"] Feb 16 11:30:31 crc kubenswrapper[4949]: E0216 11:30:31.414069 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af43164c-211d-477d-9e70-21f118ba045d" containerName="init" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.414088 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="af43164c-211d-477d-9e70-21f118ba045d" containerName="init" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.414328 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="af43164c-211d-477d-9e70-21f118ba045d" containerName="init" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.416478 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.428977 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g76dr"] Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.514410 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.532661 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-catalog-content\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.532758 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49lzw\" (UniqueName: \"kubernetes.io/projected/acc263a1-4f57-4dca-bcc5-5d5388539a5d-kube-api-access-49lzw\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.533083 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-utilities\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.635488 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-config-data\") pod \"13322d8f-5df8-488f-bead-b6b580b0b424\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.635580 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmlqs\" (UniqueName: \"kubernetes.io/projected/13322d8f-5df8-488f-bead-b6b580b0b424-kube-api-access-cmlqs\") pod \"13322d8f-5df8-488f-bead-b6b580b0b424\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.635734 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-scripts\") pod \"13322d8f-5df8-488f-bead-b6b580b0b424\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.635843 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-fernet-keys\") pod \"13322d8f-5df8-488f-bead-b6b580b0b424\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.635871 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-combined-ca-bundle\") pod \"13322d8f-5df8-488f-bead-b6b580b0b424\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.635967 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-credential-keys\") pod \"13322d8f-5df8-488f-bead-b6b580b0b424\" (UID: \"13322d8f-5df8-488f-bead-b6b580b0b424\") " Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.636450 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49lzw\" (UniqueName: \"kubernetes.io/projected/acc263a1-4f57-4dca-bcc5-5d5388539a5d-kube-api-access-49lzw\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.636653 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-utilities\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.636797 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-catalog-content\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.637468 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-catalog-content\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.637604 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-utilities\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.642874 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-scripts" (OuterVolumeSpecName: "scripts") pod "13322d8f-5df8-488f-bead-b6b580b0b424" (UID: "13322d8f-5df8-488f-bead-b6b580b0b424"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.644127 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "13322d8f-5df8-488f-bead-b6b580b0b424" (UID: "13322d8f-5df8-488f-bead-b6b580b0b424"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.644251 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13322d8f-5df8-488f-bead-b6b580b0b424-kube-api-access-cmlqs" (OuterVolumeSpecName: "kube-api-access-cmlqs") pod "13322d8f-5df8-488f-bead-b6b580b0b424" (UID: "13322d8f-5df8-488f-bead-b6b580b0b424"). InnerVolumeSpecName "kube-api-access-cmlqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.646876 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "13322d8f-5df8-488f-bead-b6b580b0b424" (UID: "13322d8f-5df8-488f-bead-b6b580b0b424"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.658011 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49lzw\" (UniqueName: \"kubernetes.io/projected/acc263a1-4f57-4dca-bcc5-5d5388539a5d-kube-api-access-49lzw\") pod \"redhat-operators-g76dr\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.677431 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-config-data" (OuterVolumeSpecName: "config-data") pod "13322d8f-5df8-488f-bead-b6b580b0b424" (UID: "13322d8f-5df8-488f-bead-b6b580b0b424"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.715407 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "13322d8f-5df8-488f-bead-b6b580b0b424" (UID: "13322d8f-5df8-488f-bead-b6b580b0b424"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.739103 4949 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.739138 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.739151 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmlqs\" (UniqueName: \"kubernetes.io/projected/13322d8f-5df8-488f-bead-b6b580b0b424-kube-api-access-cmlqs\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.739192 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.739204 4949 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.739214 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13322d8f-5df8-488f-bead-b6b580b0b424-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:31 crc kubenswrapper[4949]: I0216 11:30:31.831597 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.374795 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gftq4" event={"ID":"13322d8f-5df8-488f-bead-b6b580b0b424","Type":"ContainerDied","Data":"b0289a49d80bb4c1110c91dac47bf6381e65404d7289d8a8ae0288327798abaf"} Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.375209 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0289a49d80bb4c1110c91dac47bf6381e65404d7289d8a8ae0288327798abaf" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.375110 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gftq4" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.627411 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-gftq4"] Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.646271 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-gftq4"] Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.711993 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-p69nz"] Feb 16 11:30:32 crc kubenswrapper[4949]: E0216 11:30:32.712626 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13322d8f-5df8-488f-bead-b6b580b0b424" containerName="keystone-bootstrap" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.712654 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="13322d8f-5df8-488f-bead-b6b580b0b424" containerName="keystone-bootstrap" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.712935 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="13322d8f-5df8-488f-bead-b6b580b0b424" containerName="keystone-bootstrap" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.714096 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.720529 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.720648 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.720691 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-h4zml" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.720966 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.721474 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.727589 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-p69nz"] Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.867104 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-fernet-keys\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.867546 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-credential-keys\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.867684 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz4sf\" (UniqueName: \"kubernetes.io/projected/89c4baaa-f449-4fab-a513-1eec4a163af9-kube-api-access-lz4sf\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.867735 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-combined-ca-bundle\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.867788 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-config-data\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.867978 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-scripts\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.969975 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-scripts\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.970061 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-fernet-keys\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.970108 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-credential-keys\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.970259 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz4sf\" (UniqueName: \"kubernetes.io/projected/89c4baaa-f449-4fab-a513-1eec4a163af9-kube-api-access-lz4sf\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.970301 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-combined-ca-bundle\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.970345 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-config-data\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.977120 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-scripts\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.977259 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-config-data\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.977346 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-combined-ca-bundle\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.977768 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-fernet-keys\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.988723 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-credential-keys\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:32 crc kubenswrapper[4949]: I0216 11:30:32.994961 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz4sf\" (UniqueName: \"kubernetes.io/projected/89c4baaa-f449-4fab-a513-1eec4a163af9-kube-api-access-lz4sf\") pod \"keystone-bootstrap-p69nz\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:33 crc kubenswrapper[4949]: I0216 11:30:33.034398 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:33 crc kubenswrapper[4949]: I0216 11:30:33.269427 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13322d8f-5df8-488f-bead-b6b580b0b424" path="/var/lib/kubelet/pods/13322d8f-5df8-488f-bead-b6b580b0b424/volumes" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.207131 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.215302 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303215 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-config-data\") pod \"33f5ed17-eb47-40e9-a499-b1604532f38e\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303457 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"33f5ed17-eb47-40e9-a499-b1604532f38e\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303522 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-httpd-run\") pod \"cbf839a7-7c2a-4595-b736-601d5559d548\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303559 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qln5r\" (UniqueName: \"kubernetes.io/projected/cbf839a7-7c2a-4595-b736-601d5559d548-kube-api-access-qln5r\") pod \"cbf839a7-7c2a-4595-b736-601d5559d548\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303730 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"cbf839a7-7c2a-4595-b736-601d5559d548\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303764 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-logs\") pod \"33f5ed17-eb47-40e9-a499-b1604532f38e\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303806 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-logs\") pod \"cbf839a7-7c2a-4595-b736-601d5559d548\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303833 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-public-tls-certs\") pod \"cbf839a7-7c2a-4595-b736-601d5559d548\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303872 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-combined-ca-bundle\") pod \"33f5ed17-eb47-40e9-a499-b1604532f38e\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303936 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-scripts\") pod \"33f5ed17-eb47-40e9-a499-b1604532f38e\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.303962 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfj5n\" (UniqueName: \"kubernetes.io/projected/33f5ed17-eb47-40e9-a499-b1604532f38e-kube-api-access-lfj5n\") pod \"33f5ed17-eb47-40e9-a499-b1604532f38e\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.304015 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-httpd-run\") pod \"33f5ed17-eb47-40e9-a499-b1604532f38e\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.304270 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-scripts\") pod \"cbf839a7-7c2a-4595-b736-601d5559d548\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.304249 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "cbf839a7-7c2a-4595-b736-601d5559d548" (UID: "cbf839a7-7c2a-4595-b736-601d5559d548"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.304327 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "33f5ed17-eb47-40e9-a499-b1604532f38e" (UID: "33f5ed17-eb47-40e9-a499-b1604532f38e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.304332 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-internal-tls-certs\") pod \"33f5ed17-eb47-40e9-a499-b1604532f38e\" (UID: \"33f5ed17-eb47-40e9-a499-b1604532f38e\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.304407 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-combined-ca-bundle\") pod \"cbf839a7-7c2a-4595-b736-601d5559d548\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.304450 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-config-data\") pod \"cbf839a7-7c2a-4595-b736-601d5559d548\" (UID: \"cbf839a7-7c2a-4595-b736-601d5559d548\") " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.304771 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-logs" (OuterVolumeSpecName: "logs") pod "cbf839a7-7c2a-4595-b736-601d5559d548" (UID: "cbf839a7-7c2a-4595-b736-601d5559d548"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.305302 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.305324 4949 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.305334 4949 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cbf839a7-7c2a-4595-b736-601d5559d548-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.305320 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-logs" (OuterVolumeSpecName: "logs") pod "33f5ed17-eb47-40e9-a499-b1604532f38e" (UID: "33f5ed17-eb47-40e9-a499-b1604532f38e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.312628 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-scripts" (OuterVolumeSpecName: "scripts") pod "33f5ed17-eb47-40e9-a499-b1604532f38e" (UID: "33f5ed17-eb47-40e9-a499-b1604532f38e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.312676 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33f5ed17-eb47-40e9-a499-b1604532f38e-kube-api-access-lfj5n" (OuterVolumeSpecName: "kube-api-access-lfj5n") pod "33f5ed17-eb47-40e9-a499-b1604532f38e" (UID: "33f5ed17-eb47-40e9-a499-b1604532f38e"). InnerVolumeSpecName "kube-api-access-lfj5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.316913 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbf839a7-7c2a-4595-b736-601d5559d548-kube-api-access-qln5r" (OuterVolumeSpecName: "kube-api-access-qln5r") pod "cbf839a7-7c2a-4595-b736-601d5559d548" (UID: "cbf839a7-7c2a-4595-b736-601d5559d548"). InnerVolumeSpecName "kube-api-access-qln5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.326066 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-scripts" (OuterVolumeSpecName: "scripts") pod "cbf839a7-7c2a-4595-b736-601d5559d548" (UID: "cbf839a7-7c2a-4595-b736-601d5559d548"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.339225 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a" (OuterVolumeSpecName: "glance") pod "33f5ed17-eb47-40e9-a499-b1604532f38e" (UID: "33f5ed17-eb47-40e9-a499-b1604532f38e"). InnerVolumeSpecName "pvc-27105715-b37b-455e-aa04-f095a035218a". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.346989 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf" (OuterVolumeSpecName: "glance") pod "cbf839a7-7c2a-4595-b736-601d5559d548" (UID: "cbf839a7-7c2a-4595-b736-601d5559d548"). InnerVolumeSpecName "pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.373392 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33f5ed17-eb47-40e9-a499-b1604532f38e" (UID: "33f5ed17-eb47-40e9-a499-b1604532f38e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.407180 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") on node \"crc\" " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.407214 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qln5r\" (UniqueName: \"kubernetes.io/projected/cbf839a7-7c2a-4595-b736-601d5559d548-kube-api-access-qln5r\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.407232 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") on node \"crc\" " Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.407244 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33f5ed17-eb47-40e9-a499-b1604532f38e-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.407266 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.407279 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.407288 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfj5n\" (UniqueName: \"kubernetes.io/projected/33f5ed17-eb47-40e9-a499-b1604532f38e-kube-api-access-lfj5n\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.407297 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.423084 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cbf839a7-7c2a-4595-b736-601d5559d548","Type":"ContainerDied","Data":"15269389c418c4695ad72d57557c7d27324adba53595bc0982f53ea02098e1f0"} Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.423149 4949 scope.go:117] "RemoveContainer" containerID="47f1a29dc46143814a6286234e23efab431233de3e88a2d1625a47514beafef9" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.423322 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.433030 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"33f5ed17-eb47-40e9-a499-b1604532f38e","Type":"ContainerDied","Data":"37145a37f2682cec75d618b43912fb7885ae68a4d7664e3151876570904ee3f6"} Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.433138 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.443725 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cbf839a7-7c2a-4595-b736-601d5559d548" (UID: "cbf839a7-7c2a-4595-b736-601d5559d548"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.448275 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-config-data" (OuterVolumeSpecName: "config-data") pod "cbf839a7-7c2a-4595-b736-601d5559d548" (UID: "cbf839a7-7c2a-4595-b736-601d5559d548"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.451821 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cbf839a7-7c2a-4595-b736-601d5559d548" (UID: "cbf839a7-7c2a-4595-b736-601d5559d548"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.452061 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.452244 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-27105715-b37b-455e-aa04-f095a035218a" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a") on node "crc" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.460891 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.461036 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf") on node "crc" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.478731 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-config-data" (OuterVolumeSpecName: "config-data") pod "33f5ed17-eb47-40e9-a499-b1604532f38e" (UID: "33f5ed17-eb47-40e9-a499-b1604532f38e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.502694 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "33f5ed17-eb47-40e9-a499-b1604532f38e" (UID: "33f5ed17-eb47-40e9-a499-b1604532f38e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.510216 4949 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.510416 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.510436 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.510446 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33f5ed17-eb47-40e9-a499-b1604532f38e-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.510458 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.510470 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.510480 4949 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf839a7-7c2a-4595-b736-601d5559d548-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.550483 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.550575 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.550644 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.551764 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c897db476ea0eaab84f58dfc5ce1290f1b6a8a12d03297a1f99537a46ae19905"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.551835 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://c897db476ea0eaab84f58dfc5ce1290f1b6a8a12d03297a1f99537a46ae19905" gracePeriod=600 Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.801678 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.826050 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.867613 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:34 crc kubenswrapper[4949]: E0216 11:30:34.868294 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" containerName="glance-httpd" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.868318 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" containerName="glance-httpd" Feb 16 11:30:34 crc kubenswrapper[4949]: E0216 11:30:34.868365 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerName="glance-httpd" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.868375 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerName="glance-httpd" Feb 16 11:30:34 crc kubenswrapper[4949]: E0216 11:30:34.868403 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerName="glance-log" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.868414 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerName="glance-log" Feb 16 11:30:34 crc kubenswrapper[4949]: E0216 11:30:34.868438 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" containerName="glance-log" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.868446 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" containerName="glance-log" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.868719 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerName="glance-log" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.868744 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" containerName="glance-httpd" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.868770 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" containerName="glance-log" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.868788 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" containerName="glance-httpd" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.870360 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.874697 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.874846 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.875148 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.875280 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-76nfs" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.893236 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.910185 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.927682 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.942500 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.944842 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.947591 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.948336 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.948501 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.948699 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.949125 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntpln\" (UniqueName: \"kubernetes.io/projected/a5b65741-25f3-43db-a544-85997388cfea-kube-api-access-ntpln\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.949187 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-logs\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.949270 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.949454 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.949580 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-config-data\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.949653 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-scripts\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:34 crc kubenswrapper[4949]: I0216 11:30:34.978985 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.051911 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.051986 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052010 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052075 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052112 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntpln\" (UniqueName: \"kubernetes.io/projected/a5b65741-25f3-43db-a544-85997388cfea-kube-api-access-ntpln\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052133 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-logs\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052158 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052219 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052266 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-config-data\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052288 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052320 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-scripts\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052340 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052362 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45dz5\" (UniqueName: \"kubernetes.io/projected/495daabb-a227-4235-ace3-6caae6936da4-kube-api-access-45dz5\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052383 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-logs\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052409 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.052428 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.053421 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.053521 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-logs\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.054592 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.054625 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/459835082c7f1564eb79959c4b82a2b1818ffad6c5c0d3df291e5e1cf38dd0a7/globalmount\"" pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.056980 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-scripts\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.059030 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.060278 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-config-data\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.060941 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.088126 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntpln\" (UniqueName: \"kubernetes.io/projected/a5b65741-25f3-43db-a544-85997388cfea-kube-api-access-ntpln\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.100944 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.156487 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.157123 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.157201 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.157295 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.157349 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45dz5\" (UniqueName: \"kubernetes.io/projected/495daabb-a227-4235-ace3-6caae6936da4-kube-api-access-45dz5\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.157393 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-logs\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.157437 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.157461 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.157617 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.159482 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-logs\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.161949 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.161992 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/eecfdadb463f5cbe330cd09c7447d0c2f21fd30b4fa967afbb7cd97dad6544d3/globalmount\"" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.165468 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.169447 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.179311 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.181141 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.184365 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45dz5\" (UniqueName: \"kubernetes.io/projected/495daabb-a227-4235-ace3-6caae6936da4-kube-api-access-45dz5\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.196989 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.261049 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.267968 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33f5ed17-eb47-40e9-a499-b1604532f38e" path="/var/lib/kubelet/pods/33f5ed17-eb47-40e9-a499-b1604532f38e/volumes" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.270762 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbf839a7-7c2a-4595-b736-601d5559d548" path="/var/lib/kubelet/pods/cbf839a7-7c2a-4595-b736-601d5559d548/volumes" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.275300 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.450980 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="c897db476ea0eaab84f58dfc5ce1290f1b6a8a12d03297a1f99537a46ae19905" exitCode=0 Feb 16 11:30:35 crc kubenswrapper[4949]: I0216 11:30:35.451040 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"c897db476ea0eaab84f58dfc5ce1290f1b6a8a12d03297a1f99537a46ae19905"} Feb 16 11:30:37 crc kubenswrapper[4949]: E0216 11:30:37.090798 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Feb 16 11:30:37 crc kubenswrapper[4949]: E0216 11:30:37.091144 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x4b99,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-wbd7n_openstack(f40c7714-8669-4c33-8b1d-e3be853ca911): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:30:37 crc kubenswrapper[4949]: E0216 11:30:37.092311 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-wbd7n" podUID="f40c7714-8669-4c33-8b1d-e3be853ca911" Feb 16 11:30:37 crc kubenswrapper[4949]: E0216 11:30:37.476703 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-wbd7n" podUID="f40c7714-8669-4c33-8b1d-e3be853ca911" Feb 16 11:30:39 crc kubenswrapper[4949]: I0216 11:30:39.495644 4949 generic.go:334] "Generic (PLEG): container finished" podID="527d49b7-fb82-4e46-b608-839a2fce0f60" containerID="69b1b8aa41d1642ee1aceb6f196fc1457ca4b10b446dd6f7d8b3ba14678525d7" exitCode=0 Feb 16 11:30:39 crc kubenswrapper[4949]: I0216 11:30:39.495739 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-t8dz8" event={"ID":"527d49b7-fb82-4e46-b608-839a2fce0f60","Type":"ContainerDied","Data":"69b1b8aa41d1642ee1aceb6f196fc1457ca4b10b446dd6f7d8b3ba14678525d7"} Feb 16 11:30:40 crc kubenswrapper[4949]: I0216 11:30:40.047153 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: i/o timeout" Feb 16 11:30:45 crc kubenswrapper[4949]: E0216 11:30:45.008590 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Feb 16 11:30:45 crc kubenswrapper[4949]: E0216 11:30:45.009218 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68dh5dfh5fhfch647h5b7h66bh6bh568h599hf5h565h5dfh67ch85h5b7h59fhd7h659hf5h58dh698h544h57fh5ddh55bhb7h56h574h76h58ch676q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2v464,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c59b957e-c5f8-463f-8228-1051225f5140): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.047858 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: i/o timeout" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.048871 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.183995 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.189381 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.221091 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-svc\") pod \"6c60c38e-2850-474f-85f8-2bd436299ebe\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.221180 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qh2m8\" (UniqueName: \"kubernetes.io/projected/6c60c38e-2850-474f-85f8-2bd436299ebe-kube-api-access-qh2m8\") pod \"6c60c38e-2850-474f-85f8-2bd436299ebe\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.221330 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-config\") pod \"6c60c38e-2850-474f-85f8-2bd436299ebe\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.221487 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-nb\") pod \"6c60c38e-2850-474f-85f8-2bd436299ebe\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.221546 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-sb\") pod \"6c60c38e-2850-474f-85f8-2bd436299ebe\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.221568 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-swift-storage-0\") pod \"6c60c38e-2850-474f-85f8-2bd436299ebe\" (UID: \"6c60c38e-2850-474f-85f8-2bd436299ebe\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.227902 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c60c38e-2850-474f-85f8-2bd436299ebe-kube-api-access-qh2m8" (OuterVolumeSpecName: "kube-api-access-qh2m8") pod "6c60c38e-2850-474f-85f8-2bd436299ebe" (UID: "6c60c38e-2850-474f-85f8-2bd436299ebe"). InnerVolumeSpecName "kube-api-access-qh2m8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.297095 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6c60c38e-2850-474f-85f8-2bd436299ebe" (UID: "6c60c38e-2850-474f-85f8-2bd436299ebe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.315422 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6c60c38e-2850-474f-85f8-2bd436299ebe" (UID: "6c60c38e-2850-474f-85f8-2bd436299ebe"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.317902 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6c60c38e-2850-474f-85f8-2bd436299ebe" (UID: "6c60c38e-2850-474f-85f8-2bd436299ebe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.324055 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmjgv\" (UniqueName: \"kubernetes.io/projected/527d49b7-fb82-4e46-b608-839a2fce0f60-kube-api-access-zmjgv\") pod \"527d49b7-fb82-4e46-b608-839a2fce0f60\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.324616 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-combined-ca-bundle\") pod \"527d49b7-fb82-4e46-b608-839a2fce0f60\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.324834 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-config\") pod \"527d49b7-fb82-4e46-b608-839a2fce0f60\" (UID: \"527d49b7-fb82-4e46-b608-839a2fce0f60\") " Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.326238 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.326269 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.326288 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.326301 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qh2m8\" (UniqueName: \"kubernetes.io/projected/6c60c38e-2850-474f-85f8-2bd436299ebe-kube-api-access-qh2m8\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.326433 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6c60c38e-2850-474f-85f8-2bd436299ebe" (UID: "6c60c38e-2850-474f-85f8-2bd436299ebe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.327059 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/527d49b7-fb82-4e46-b608-839a2fce0f60-kube-api-access-zmjgv" (OuterVolumeSpecName: "kube-api-access-zmjgv") pod "527d49b7-fb82-4e46-b608-839a2fce0f60" (UID: "527d49b7-fb82-4e46-b608-839a2fce0f60"). InnerVolumeSpecName "kube-api-access-zmjgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.350894 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-config" (OuterVolumeSpecName: "config") pod "6c60c38e-2850-474f-85f8-2bd436299ebe" (UID: "6c60c38e-2850-474f-85f8-2bd436299ebe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.356849 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "527d49b7-fb82-4e46-b608-839a2fce0f60" (UID: "527d49b7-fb82-4e46-b608-839a2fce0f60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.357381 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-config" (OuterVolumeSpecName: "config") pod "527d49b7-fb82-4e46-b608-839a2fce0f60" (UID: "527d49b7-fb82-4e46-b608-839a2fce0f60"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.427989 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.428022 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmjgv\" (UniqueName: \"kubernetes.io/projected/527d49b7-fb82-4e46-b608-839a2fce0f60-kube-api-access-zmjgv\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.428038 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.428050 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c60c38e-2850-474f-85f8-2bd436299ebe-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.428061 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/527d49b7-fb82-4e46-b608-839a2fce0f60-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.577538 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" event={"ID":"6c60c38e-2850-474f-85f8-2bd436299ebe","Type":"ContainerDied","Data":"c74486cd9df4468784f4ea0b598e579b076428dce6441b01adc6e6c064834e2d"} Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.577658 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.580715 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-t8dz8" event={"ID":"527d49b7-fb82-4e46-b608-839a2fce0f60","Type":"ContainerDied","Data":"f467af5effe1ad02b5b1ae6d7729b27bea25232505ab2beecac76bf4882aadd7"} Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.580774 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f467af5effe1ad02b5b1ae6d7729b27bea25232505ab2beecac76bf4882aadd7" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.580857 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-t8dz8" Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.632302 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-wl8jh"] Feb 16 11:30:45 crc kubenswrapper[4949]: I0216 11:30:45.646906 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-wl8jh"] Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.377324 4949 scope.go:117] "RemoveContainer" containerID="cd91806a364c21d1d224bb7a95f370a062b1f7d79513ffd02bfdd697b2db567e" Feb 16 11:30:46 crc kubenswrapper[4949]: E0216 11:30:46.483087 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Feb 16 11:30:46 crc kubenswrapper[4949]: E0216 11:30:46.483478 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vsjfm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-vh5z2_openstack(4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 16 11:30:46 crc kubenswrapper[4949]: E0216 11:30:46.485694 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-vh5z2" podUID="4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.542101 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-blz5m"] Feb 16 11:30:46 crc kubenswrapper[4949]: E0216 11:30:46.542660 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="527d49b7-fb82-4e46-b608-839a2fce0f60" containerName="neutron-db-sync" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.542673 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="527d49b7-fb82-4e46-b608-839a2fce0f60" containerName="neutron-db-sync" Feb 16 11:30:46 crc kubenswrapper[4949]: E0216 11:30:46.542695 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="dnsmasq-dns" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.542700 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="dnsmasq-dns" Feb 16 11:30:46 crc kubenswrapper[4949]: E0216 11:30:46.542729 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="init" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.542736 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="init" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.542963 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="dnsmasq-dns" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.542979 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="527d49b7-fb82-4e46-b608-839a2fce0f60" containerName="neutron-db-sync" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.545993 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.562094 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-blz5m"] Feb 16 11:30:46 crc kubenswrapper[4949]: E0216 11:30:46.616508 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-vh5z2" podUID="4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.662555 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.662880 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-config\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.662930 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.663001 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8x7c\" (UniqueName: \"kubernetes.io/projected/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-kube-api-access-g8x7c\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.663206 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.663278 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.666343 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7f6ff59f84-j4tfn"] Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.669362 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.671958 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-r87xd" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.672405 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.673056 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.675462 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.684251 4949 scope.go:117] "RemoveContainer" containerID="096a3feef6aa8c035ad8397f106a01d25e81301f345fa43c7141180d39844ad5" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.696939 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f6ff59f84-j4tfn"] Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767009 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767057 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-config\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767104 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-httpd-config\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767211 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjv9q\" (UniqueName: \"kubernetes.io/projected/4f44c7bf-e0a8-4250-b513-147e6e88718d-kube-api-access-gjv9q\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767246 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-config\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767275 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767312 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8x7c\" (UniqueName: \"kubernetes.io/projected/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-kube-api-access-g8x7c\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767345 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-combined-ca-bundle\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767417 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767460 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.767489 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-ovndb-tls-certs\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.768301 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.771740 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-config\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.772359 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.778481 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.789077 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.797998 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8x7c\" (UniqueName: \"kubernetes.io/projected/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-kube-api-access-g8x7c\") pod \"dnsmasq-dns-84b966f6c9-blz5m\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.811229 4949 scope.go:117] "RemoveContainer" containerID="a861c7291314c3ccce2249281f01ad0fa0fb456adfd8af36e9e1270d33ebe980" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.869589 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-ovndb-tls-certs\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.869673 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-config\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.870302 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-httpd-config\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.870404 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjv9q\" (UniqueName: \"kubernetes.io/projected/4f44c7bf-e0a8-4250-b513-147e6e88718d-kube-api-access-gjv9q\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.870451 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-combined-ca-bundle\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.878290 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-config\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.887035 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-combined-ca-bundle\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.888059 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-ovndb-tls-certs\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.892248 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-httpd-config\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.895267 4949 scope.go:117] "RemoveContainer" containerID="81d321ae3cf8ae54ff452597f9576b0c0a3cd11dc176e9a1b231f17a43bc97b9" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.902527 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjv9q\" (UniqueName: \"kubernetes.io/projected/4f44c7bf-e0a8-4250-b513-147e6e88718d-kube-api-access-gjv9q\") pod \"neutron-7f6ff59f84-j4tfn\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:46 crc kubenswrapper[4949]: I0216 11:30:46.953671 4949 scope.go:117] "RemoveContainer" containerID="c7fc92c9e2443a29efc50b9daa8d763effbd9224e5e5382c017f887381e6a1e2" Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.017789 4949 scope.go:117] "RemoveContainer" containerID="e468ea0ac8a33755ff6f7b898d6e89c147068482f948dcf3da738d409ba619d8" Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.018364 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.067790 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.134203 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g76dr"] Feb 16 11:30:47 crc kubenswrapper[4949]: W0216 11:30:47.198147 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podacc263a1_4f57_4dca_bcc5_5d5388539a5d.slice/crio-29ce604a72cd129b181271139de9b71a0233562f5fab2e134b087d9c42b19b10 WatchSource:0}: Error finding container 29ce604a72cd129b181271139de9b71a0233562f5fab2e134b087d9c42b19b10: Status 404 returned error can't find the container with id 29ce604a72cd129b181271139de9b71a0233562f5fab2e134b087d9c42b19b10 Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.256147 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" path="/var/lib/kubelet/pods/6c60c38e-2850-474f-85f8-2bd436299ebe/volumes" Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.381387 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-p69nz"] Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.436618 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:30:47 crc kubenswrapper[4949]: W0216 11:30:47.451850 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5b65741_25f3_43db_a544_85997388cfea.slice/crio-a295d66316bcdebc2d40746024ffd37d2ca845aecac50535544d493ac8bb845c WatchSource:0}: Error finding container a295d66316bcdebc2d40746024ffd37d2ca845aecac50535544d493ac8bb845c: Status 404 returned error can't find the container with id a295d66316bcdebc2d40746024ffd37d2ca845aecac50535544d493ac8bb845c Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.667949 4949 generic.go:334] "Generic (PLEG): container finished" podID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerID="551239ae0fd1826a8ae33dd25021464daf5cdbd897f04cebea1430951aa0a85d" exitCode=0 Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.668299 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g76dr" event={"ID":"acc263a1-4f57-4dca-bcc5-5d5388539a5d","Type":"ContainerDied","Data":"551239ae0fd1826a8ae33dd25021464daf5cdbd897f04cebea1430951aa0a85d"} Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.668339 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g76dr" event={"ID":"acc263a1-4f57-4dca-bcc5-5d5388539a5d","Type":"ContainerStarted","Data":"29ce604a72cd129b181271139de9b71a0233562f5fab2e134b087d9c42b19b10"} Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.682823 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437"} Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.716722 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-ztzjs" event={"ID":"667080fb-b428-4b48-87c9-a955ff09771a","Type":"ContainerStarted","Data":"0d8e0dfc7ed2a7a584158a61c7a84d8168b4e7cce323bf8d92f2ace421633ffc"} Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.735859 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b65741-25f3-43db-a544-85997388cfea","Type":"ContainerStarted","Data":"a295d66316bcdebc2d40746024ffd37d2ca845aecac50535544d493ac8bb845c"} Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.746910 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-p69nz" event={"ID":"89c4baaa-f449-4fab-a513-1eec4a163af9","Type":"ContainerStarted","Data":"7b437b6abbb245057e3bc21b728c6ec9ad2a89f2aeade4c274af45b1d0535f97"} Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.748332 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-n7hw7" event={"ID":"18a2cf62-0669-4033-ba7f-c69805fa9c3a","Type":"ContainerStarted","Data":"8777fd92ca59578d672bca13a68eaf951c8a73f222708fec25ee0073f31a7b75"} Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.796095 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-ztzjs" podStartSLOduration=3.582240339 podStartE2EDuration="30.796068802s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="2026-02-16 11:30:19.131518803 +0000 UTC m=+1408.760852968" lastFinishedPulling="2026-02-16 11:30:46.345347266 +0000 UTC m=+1435.974681431" observedRunningTime="2026-02-16 11:30:47.747385463 +0000 UTC m=+1437.376719628" watchObservedRunningTime="2026-02-16 11:30:47.796068802 +0000 UTC m=+1437.425402967" Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.818125 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:30:47 crc kubenswrapper[4949]: I0216 11:30:47.850136 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-n7hw7" podStartSLOduration=5.552555326 podStartE2EDuration="30.850103704s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="2026-02-16 11:30:19.727231616 +0000 UTC m=+1409.356565781" lastFinishedPulling="2026-02-16 11:30:45.024779994 +0000 UTC m=+1434.654114159" observedRunningTime="2026-02-16 11:30:47.770924604 +0000 UTC m=+1437.400258769" watchObservedRunningTime="2026-02-16 11:30:47.850103704 +0000 UTC m=+1437.479437879" Feb 16 11:30:48 crc kubenswrapper[4949]: I0216 11:30:48.127225 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-blz5m"] Feb 16 11:30:48 crc kubenswrapper[4949]: I0216 11:30:48.310260 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f6ff59f84-j4tfn"] Feb 16 11:30:48 crc kubenswrapper[4949]: I0216 11:30:48.762737 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b65741-25f3-43db-a544-85997388cfea","Type":"ContainerStarted","Data":"6e14183673e66ee7f99da970e9998cd1900b24e724e8678ee11c26b965919ae4"} Feb 16 11:30:48 crc kubenswrapper[4949]: I0216 11:30:48.765875 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-p69nz" event={"ID":"89c4baaa-f449-4fab-a513-1eec4a163af9","Type":"ContainerStarted","Data":"b7be82351d99f931b79e512b13c2ed448abfad0e9e2efeb887e753115cd22e5e"} Feb 16 11:30:48 crc kubenswrapper[4949]: I0216 11:30:48.771526 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" event={"ID":"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d","Type":"ContainerStarted","Data":"3639e576777fbe13a9beac39844bc62e1db9a3f2fd222c8cc3684f8e1eb0214e"} Feb 16 11:30:48 crc kubenswrapper[4949]: I0216 11:30:48.773900 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"495daabb-a227-4235-ace3-6caae6936da4","Type":"ContainerStarted","Data":"fb297072926614665cadf16825deea23583fd91df765c6b2ce82fa6b5dec5a36"} Feb 16 11:30:48 crc kubenswrapper[4949]: I0216 11:30:48.814600 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-p69nz" podStartSLOduration=16.814575101 podStartE2EDuration="16.814575101s" podCreationTimestamp="2026-02-16 11:30:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:48.801596761 +0000 UTC m=+1438.430930916" watchObservedRunningTime="2026-02-16 11:30:48.814575101 +0000 UTC m=+1438.443909266" Feb 16 11:30:48 crc kubenswrapper[4949]: W0216 11:30:48.893353 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f44c7bf_e0a8_4250_b513_147e6e88718d.slice/crio-fce5e9dea96f7b3de20adb34f3889488a5b616398a3402408b95e3a510892a06 WatchSource:0}: Error finding container fce5e9dea96f7b3de20adb34f3889488a5b616398a3402408b95e3a510892a06: Status 404 returned error can't find the container with id fce5e9dea96f7b3de20adb34f3889488a5b616398a3402408b95e3a510892a06 Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.079678 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-74f8bcc545-j46d5"] Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.082121 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.086362 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.091419 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.117329 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-74f8bcc545-j46d5"] Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.151541 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-httpd-config\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.151634 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-ovndb-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.151678 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-internal-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.151742 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-config\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.151786 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nfp9\" (UniqueName: \"kubernetes.io/projected/b7171cf3-1339-4609-bf59-f9a3777bb15c-kube-api-access-7nfp9\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.151889 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-public-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.151980 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-combined-ca-bundle\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.254125 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-httpd-config\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.254221 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-ovndb-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.254255 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-internal-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.254310 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-config\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.254346 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nfp9\" (UniqueName: \"kubernetes.io/projected/b7171cf3-1339-4609-bf59-f9a3777bb15c-kube-api-access-7nfp9\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.254425 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-public-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.254498 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-combined-ca-bundle\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.267437 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-httpd-config\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.271703 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-internal-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.271733 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-public-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.272817 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-combined-ca-bundle\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.277410 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-ovndb-tls-certs\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.286778 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-config\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.287164 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nfp9\" (UniqueName: \"kubernetes.io/projected/b7171cf3-1339-4609-bf59-f9a3777bb15c-kube-api-access-7nfp9\") pod \"neutron-74f8bcc545-j46d5\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.430568 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.791289 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"495daabb-a227-4235-ace3-6caae6936da4","Type":"ContainerStarted","Data":"f3bf7647a6b8f333884c019d6fe52e91fe985607e23774e732324644cc5a338e"} Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.797427 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f6ff59f84-j4tfn" event={"ID":"4f44c7bf-e0a8-4250-b513-147e6e88718d","Type":"ContainerStarted","Data":"093358e6b634260643a762f42b27824e10a12260cc9d2a87fa7d0d7057ca5c34"} Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.797460 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f6ff59f84-j4tfn" event={"ID":"4f44c7bf-e0a8-4250-b513-147e6e88718d","Type":"ContainerStarted","Data":"fce5e9dea96f7b3de20adb34f3889488a5b616398a3402408b95e3a510892a06"} Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.801328 4949 generic.go:334] "Generic (PLEG): container finished" podID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" containerID="aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c" exitCode=0 Feb 16 11:30:49 crc kubenswrapper[4949]: I0216 11:30:49.803311 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" event={"ID":"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d","Type":"ContainerDied","Data":"aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c"} Feb 16 11:30:50 crc kubenswrapper[4949]: I0216 11:30:50.051448 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7ff5475cc9-wl8jh" podUID="6c60c38e-2850-474f-85f8-2bd436299ebe" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: i/o timeout" Feb 16 11:30:50 crc kubenswrapper[4949]: I0216 11:30:50.160956 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-74f8bcc545-j46d5"] Feb 16 11:30:50 crc kubenswrapper[4949]: I0216 11:30:50.822084 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c59b957e-c5f8-463f-8228-1051225f5140","Type":"ContainerStarted","Data":"8eaa87aadd7b317f610cc7d3050e7b6dc48d4248736b9228780b0c0df54af339"} Feb 16 11:30:50 crc kubenswrapper[4949]: I0216 11:30:50.824773 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74f8bcc545-j46d5" event={"ID":"b7171cf3-1339-4609-bf59-f9a3777bb15c","Type":"ContainerStarted","Data":"7e22823c267d9ec32b234f7b36ee28fd37b5609b771c357a521c731185076f05"} Feb 16 11:30:50 crc kubenswrapper[4949]: I0216 11:30:50.834894 4949 generic.go:334] "Generic (PLEG): container finished" podID="18a2cf62-0669-4033-ba7f-c69805fa9c3a" containerID="8777fd92ca59578d672bca13a68eaf951c8a73f222708fec25ee0073f31a7b75" exitCode=0 Feb 16 11:30:50 crc kubenswrapper[4949]: I0216 11:30:50.834969 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-n7hw7" event={"ID":"18a2cf62-0669-4033-ba7f-c69805fa9c3a","Type":"ContainerDied","Data":"8777fd92ca59578d672bca13a68eaf951c8a73f222708fec25ee0073f31a7b75"} Feb 16 11:30:50 crc kubenswrapper[4949]: I0216 11:30:50.841272 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g76dr" event={"ID":"acc263a1-4f57-4dca-bcc5-5d5388539a5d","Type":"ContainerStarted","Data":"92a955296f684feb4a5a1638bbfdec008357a86f1fc56d0721f036a71a5088dd"} Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.877920 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f6ff59f84-j4tfn" event={"ID":"4f44c7bf-e0a8-4250-b513-147e6e88718d","Type":"ContainerStarted","Data":"e5c79f6a615745bee1f21cdaaa659128533899b6ee7d10afd6a96d6843731d3a"} Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.878954 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.888051 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" event={"ID":"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d","Type":"ContainerStarted","Data":"9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97"} Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.888238 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.892829 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"495daabb-a227-4235-ace3-6caae6936da4","Type":"ContainerStarted","Data":"df915a5fb326ecb825acc3a5561fc8c91a49c4de269d1de2e19b6f78cd7f8925"} Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.908350 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b65741-25f3-43db-a544-85997388cfea","Type":"ContainerStarted","Data":"69c4717876345652ee32a203179fde6dd840ec23dd97af04f5ce8ed8f39031cd"} Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.911645 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74f8bcc545-j46d5" event={"ID":"b7171cf3-1339-4609-bf59-f9a3777bb15c","Type":"ContainerStarted","Data":"2447ec3101884bb3a4a65d8a5bb4e65c0eea62a71a0577b852d152d4e8899067"} Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.911704 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74f8bcc545-j46d5" event={"ID":"b7171cf3-1339-4609-bf59-f9a3777bb15c","Type":"ContainerStarted","Data":"81542eacf05937369a012bc31d2f24fe8e1bbd60b859989367e99bfdf914d1a1"} Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.911796 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.914906 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-wbd7n" event={"ID":"f40c7714-8669-4c33-8b1d-e3be853ca911","Type":"ContainerStarted","Data":"410a1728f81cf8302e7f4956877b44ecb0e45ed5d28f8328a94dea7b626acbb6"} Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.924554 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7f6ff59f84-j4tfn" podStartSLOduration=5.924527536 podStartE2EDuration="5.924527536s" podCreationTimestamp="2026-02-16 11:30:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:51.900210992 +0000 UTC m=+1441.529545157" watchObservedRunningTime="2026-02-16 11:30:51.924527536 +0000 UTC m=+1441.553861701" Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.926335 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" podStartSLOduration=5.926323497 podStartE2EDuration="5.926323497s" podCreationTimestamp="2026-02-16 11:30:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:51.923589399 +0000 UTC m=+1441.552923594" watchObservedRunningTime="2026-02-16 11:30:51.926323497 +0000 UTC m=+1441.555657662" Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.978350 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-wbd7n" podStartSLOduration=3.265432847 podStartE2EDuration="34.978328071s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="2026-02-16 11:30:19.757721916 +0000 UTC m=+1409.387056081" lastFinishedPulling="2026-02-16 11:30:51.47061714 +0000 UTC m=+1441.099951305" observedRunningTime="2026-02-16 11:30:51.974949245 +0000 UTC m=+1441.604283410" watchObservedRunningTime="2026-02-16 11:30:51.978328071 +0000 UTC m=+1441.607662236" Feb 16 11:30:51 crc kubenswrapper[4949]: I0216 11:30:51.987849 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=17.987825323 podStartE2EDuration="17.987825323s" podCreationTimestamp="2026-02-16 11:30:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:51.960709649 +0000 UTC m=+1441.590043804" watchObservedRunningTime="2026-02-16 11:30:51.987825323 +0000 UTC m=+1441.617159488" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.093159 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-74f8bcc545-j46d5" podStartSLOduration=3.093120118 podStartE2EDuration="3.093120118s" podCreationTimestamp="2026-02-16 11:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:52.0136679 +0000 UTC m=+1441.643002065" watchObservedRunningTime="2026-02-16 11:30:52.093120118 +0000 UTC m=+1441.722454283" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.103059 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=18.10301429 podStartE2EDuration="18.10301429s" podCreationTimestamp="2026-02-16 11:30:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:52.048608827 +0000 UTC m=+1441.677942992" watchObservedRunningTime="2026-02-16 11:30:52.10301429 +0000 UTC m=+1441.732348455" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.567577 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.696146 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-config-data\") pod \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.696268 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18a2cf62-0669-4033-ba7f-c69805fa9c3a-logs\") pod \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.696484 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-combined-ca-bundle\") pod \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.696524 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-scripts\") pod \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.696717 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvh7x\" (UniqueName: \"kubernetes.io/projected/18a2cf62-0669-4033-ba7f-c69805fa9c3a-kube-api-access-rvh7x\") pod \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\" (UID: \"18a2cf62-0669-4033-ba7f-c69805fa9c3a\") " Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.696650 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18a2cf62-0669-4033-ba7f-c69805fa9c3a-logs" (OuterVolumeSpecName: "logs") pod "18a2cf62-0669-4033-ba7f-c69805fa9c3a" (UID: "18a2cf62-0669-4033-ba7f-c69805fa9c3a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.697525 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18a2cf62-0669-4033-ba7f-c69805fa9c3a-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.704482 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18a2cf62-0669-4033-ba7f-c69805fa9c3a-kube-api-access-rvh7x" (OuterVolumeSpecName: "kube-api-access-rvh7x") pod "18a2cf62-0669-4033-ba7f-c69805fa9c3a" (UID: "18a2cf62-0669-4033-ba7f-c69805fa9c3a"). InnerVolumeSpecName "kube-api-access-rvh7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.731155 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-scripts" (OuterVolumeSpecName: "scripts") pod "18a2cf62-0669-4033-ba7f-c69805fa9c3a" (UID: "18a2cf62-0669-4033-ba7f-c69805fa9c3a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.750325 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-config-data" (OuterVolumeSpecName: "config-data") pod "18a2cf62-0669-4033-ba7f-c69805fa9c3a" (UID: "18a2cf62-0669-4033-ba7f-c69805fa9c3a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.767545 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18a2cf62-0669-4033-ba7f-c69805fa9c3a" (UID: "18a2cf62-0669-4033-ba7f-c69805fa9c3a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.799873 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.800239 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.800254 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18a2cf62-0669-4033-ba7f-c69805fa9c3a-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.800263 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvh7x\" (UniqueName: \"kubernetes.io/projected/18a2cf62-0669-4033-ba7f-c69805fa9c3a-kube-api-access-rvh7x\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.932990 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-n7hw7" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.933022 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-n7hw7" event={"ID":"18a2cf62-0669-4033-ba7f-c69805fa9c3a","Type":"ContainerDied","Data":"49d854202314dbf79b0dc3bed0f8068dfd54e14921f114b604abf3de2de135d6"} Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.933109 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49d854202314dbf79b0dc3bed0f8068dfd54e14921f114b604abf3de2de135d6" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.942861 4949 generic.go:334] "Generic (PLEG): container finished" podID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerID="92a955296f684feb4a5a1638bbfdec008357a86f1fc56d0721f036a71a5088dd" exitCode=0 Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.942958 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g76dr" event={"ID":"acc263a1-4f57-4dca-bcc5-5d5388539a5d","Type":"ContainerDied","Data":"92a955296f684feb4a5a1638bbfdec008357a86f1fc56d0721f036a71a5088dd"} Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.985131 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-57d8c9758b-hz957"] Feb 16 11:30:52 crc kubenswrapper[4949]: E0216 11:30:52.986041 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18a2cf62-0669-4033-ba7f-c69805fa9c3a" containerName="placement-db-sync" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.986072 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="18a2cf62-0669-4033-ba7f-c69805fa9c3a" containerName="placement-db-sync" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.986355 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="18a2cf62-0669-4033-ba7f-c69805fa9c3a" containerName="placement-db-sync" Feb 16 11:30:52 crc kubenswrapper[4949]: I0216 11:30:52.988258 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.001277 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.001280 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.001574 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-twm6t" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.001677 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.002259 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.043018 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-57d8c9758b-hz957"] Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.107484 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-combined-ca-bundle\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.107607 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-internal-tls-certs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.108157 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkxb7\" (UniqueName: \"kubernetes.io/projected/74cdae97-ce6a-4653-a84d-6f46d9795fbb-kube-api-access-hkxb7\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.108328 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-public-tls-certs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.108697 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-config-data\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.108918 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74cdae97-ce6a-4653-a84d-6f46d9795fbb-logs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.109014 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-scripts\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.211634 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-combined-ca-bundle\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.211703 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-internal-tls-certs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.211734 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkxb7\" (UniqueName: \"kubernetes.io/projected/74cdae97-ce6a-4653-a84d-6f46d9795fbb-kube-api-access-hkxb7\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.211771 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-public-tls-certs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.211858 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-config-data\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.211908 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74cdae97-ce6a-4653-a84d-6f46d9795fbb-logs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.211940 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-scripts\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.212651 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74cdae97-ce6a-4653-a84d-6f46d9795fbb-logs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.218522 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-public-tls-certs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.218592 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-combined-ca-bundle\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.218838 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-scripts\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.225929 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-internal-tls-certs\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.232338 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-config-data\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.233811 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkxb7\" (UniqueName: \"kubernetes.io/projected/74cdae97-ce6a-4653-a84d-6f46d9795fbb-kube-api-access-hkxb7\") pod \"placement-57d8c9758b-hz957\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.347534 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.958438 4949 generic.go:334] "Generic (PLEG): container finished" podID="89c4baaa-f449-4fab-a513-1eec4a163af9" containerID="b7be82351d99f931b79e512b13c2ed448abfad0e9e2efeb887e753115cd22e5e" exitCode=0 Feb 16 11:30:53 crc kubenswrapper[4949]: I0216 11:30:53.958513 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-p69nz" event={"ID":"89c4baaa-f449-4fab-a513-1eec4a163af9","Type":"ContainerDied","Data":"b7be82351d99f931b79e512b13c2ed448abfad0e9e2efeb887e753115cd22e5e"} Feb 16 11:30:54 crc kubenswrapper[4949]: I0216 11:30:54.973906 4949 generic.go:334] "Generic (PLEG): container finished" podID="667080fb-b428-4b48-87c9-a955ff09771a" containerID="0d8e0dfc7ed2a7a584158a61c7a84d8168b4e7cce323bf8d92f2ace421633ffc" exitCode=0 Feb 16 11:30:54 crc kubenswrapper[4949]: I0216 11:30:54.974151 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-ztzjs" event={"ID":"667080fb-b428-4b48-87c9-a955ff09771a","Type":"ContainerDied","Data":"0d8e0dfc7ed2a7a584158a61c7a84d8168b4e7cce323bf8d92f2ace421633ffc"} Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.198239 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.198301 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.253846 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.253933 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.275916 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.275971 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.327240 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.336020 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.991536 4949 generic.go:334] "Generic (PLEG): container finished" podID="f40c7714-8669-4c33-8b1d-e3be853ca911" containerID="410a1728f81cf8302e7f4956877b44ecb0e45ed5d28f8328a94dea7b626acbb6" exitCode=0 Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.991580 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-wbd7n" event={"ID":"f40c7714-8669-4c33-8b1d-e3be853ca911","Type":"ContainerDied","Data":"410a1728f81cf8302e7f4956877b44ecb0e45ed5d28f8328a94dea7b626acbb6"} Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.994211 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.994260 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.994276 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 16 11:30:55 crc kubenswrapper[4949]: I0216 11:30:55.994286 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.006873 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-p69nz" event={"ID":"89c4baaa-f449-4fab-a513-1eec4a163af9","Type":"ContainerDied","Data":"7b437b6abbb245057e3bc21b728c6ec9ad2a89f2aeade4c274af45b1d0535f97"} Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.007115 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b437b6abbb245057e3bc21b728c6ec9ad2a89f2aeade4c274af45b1d0535f97" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.011975 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-ztzjs" event={"ID":"667080fb-b428-4b48-87c9-a955ff09771a","Type":"ContainerDied","Data":"adf7eb23d6ff1b3006ba4d52003ea441cb4033bf313be33c9ac72cf9293583c9"} Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.012028 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adf7eb23d6ff1b3006ba4d52003ea441cb4033bf313be33c9ac72cf9293583c9" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.022390 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.112506 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-vhjrk"] Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.112775 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" podUID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerName="dnsmasq-dns" containerID="cri-o://4755f11655c6804f93135f9e28d881f0998c90db8faf1f8da415f625b1fe096c" gracePeriod=10 Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.239705 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.308695 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.333211 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqq8v\" (UniqueName: \"kubernetes.io/projected/667080fb-b428-4b48-87c9-a955ff09771a-kube-api-access-rqq8v\") pod \"667080fb-b428-4b48-87c9-a955ff09771a\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.333810 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-combined-ca-bundle\") pod \"667080fb-b428-4b48-87c9-a955ff09771a\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.333849 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-config-data\") pod \"667080fb-b428-4b48-87c9-a955ff09771a\" (UID: \"667080fb-b428-4b48-87c9-a955ff09771a\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.347355 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/667080fb-b428-4b48-87c9-a955ff09771a-kube-api-access-rqq8v" (OuterVolumeSpecName: "kube-api-access-rqq8v") pod "667080fb-b428-4b48-87c9-a955ff09771a" (UID: "667080fb-b428-4b48-87c9-a955ff09771a"). InnerVolumeSpecName "kube-api-access-rqq8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.435952 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "667080fb-b428-4b48-87c9-a955ff09771a" (UID: "667080fb-b428-4b48-87c9-a955ff09771a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.437630 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-config-data\") pod \"89c4baaa-f449-4fab-a513-1eec4a163af9\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.466492 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-credential-keys\") pod \"89c4baaa-f449-4fab-a513-1eec4a163af9\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.466552 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-fernet-keys\") pod \"89c4baaa-f449-4fab-a513-1eec4a163af9\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.466592 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-combined-ca-bundle\") pod \"89c4baaa-f449-4fab-a513-1eec4a163af9\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.466721 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-scripts\") pod \"89c4baaa-f449-4fab-a513-1eec4a163af9\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.466755 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz4sf\" (UniqueName: \"kubernetes.io/projected/89c4baaa-f449-4fab-a513-1eec4a163af9-kube-api-access-lz4sf\") pod \"89c4baaa-f449-4fab-a513-1eec4a163af9\" (UID: \"89c4baaa-f449-4fab-a513-1eec4a163af9\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.468131 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.468150 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqq8v\" (UniqueName: \"kubernetes.io/projected/667080fb-b428-4b48-87c9-a955ff09771a-kube-api-access-rqq8v\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.477029 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-config-data" (OuterVolumeSpecName: "config-data") pod "667080fb-b428-4b48-87c9-a955ff09771a" (UID: "667080fb-b428-4b48-87c9-a955ff09771a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.477869 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "89c4baaa-f449-4fab-a513-1eec4a163af9" (UID: "89c4baaa-f449-4fab-a513-1eec4a163af9"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.480906 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "89c4baaa-f449-4fab-a513-1eec4a163af9" (UID: "89c4baaa-f449-4fab-a513-1eec4a163af9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.482337 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-scripts" (OuterVolumeSpecName: "scripts") pod "89c4baaa-f449-4fab-a513-1eec4a163af9" (UID: "89c4baaa-f449-4fab-a513-1eec4a163af9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.493428 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89c4baaa-f449-4fab-a513-1eec4a163af9-kube-api-access-lz4sf" (OuterVolumeSpecName: "kube-api-access-lz4sf") pod "89c4baaa-f449-4fab-a513-1eec4a163af9" (UID: "89c4baaa-f449-4fab-a513-1eec4a163af9"). InnerVolumeSpecName "kube-api-access-lz4sf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.496011 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-config-data" (OuterVolumeSpecName: "config-data") pod "89c4baaa-f449-4fab-a513-1eec4a163af9" (UID: "89c4baaa-f449-4fab-a513-1eec4a163af9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.553374 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89c4baaa-f449-4fab-a513-1eec4a163af9" (UID: "89c4baaa-f449-4fab-a513-1eec4a163af9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.575861 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.575913 4949 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.575925 4949 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.575934 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.575945 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/667080fb-b428-4b48-87c9-a955ff09771a-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.575954 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89c4baaa-f449-4fab-a513-1eec4a163af9-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.575962 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz4sf\" (UniqueName: \"kubernetes.io/projected/89c4baaa-f449-4fab-a513-1eec4a163af9-kube-api-access-lz4sf\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.583978 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.678537 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-combined-ca-bundle\") pod \"f40c7714-8669-4c33-8b1d-e3be853ca911\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.678843 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-db-sync-config-data\") pod \"f40c7714-8669-4c33-8b1d-e3be853ca911\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.679060 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4b99\" (UniqueName: \"kubernetes.io/projected/f40c7714-8669-4c33-8b1d-e3be853ca911-kube-api-access-x4b99\") pod \"f40c7714-8669-4c33-8b1d-e3be853ca911\" (UID: \"f40c7714-8669-4c33-8b1d-e3be853ca911\") " Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.684392 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f40c7714-8669-4c33-8b1d-e3be853ca911" (UID: "f40c7714-8669-4c33-8b1d-e3be853ca911"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.687728 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f40c7714-8669-4c33-8b1d-e3be853ca911-kube-api-access-x4b99" (OuterVolumeSpecName: "kube-api-access-x4b99") pod "f40c7714-8669-4c33-8b1d-e3be853ca911" (UID: "f40c7714-8669-4c33-8b1d-e3be853ca911"). InnerVolumeSpecName "kube-api-access-x4b99". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.719475 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f40c7714-8669-4c33-8b1d-e3be853ca911" (UID: "f40c7714-8669-4c33-8b1d-e3be853ca911"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.783012 4949 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.783053 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4b99\" (UniqueName: \"kubernetes.io/projected/f40c7714-8669-4c33-8b1d-e3be853ca911-kube-api-access-x4b99\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.783065 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f40c7714-8669-4c33-8b1d-e3be853ca911-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:57 crc kubenswrapper[4949]: I0216 11:30:57.837818 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-57d8c9758b-hz957"] Feb 16 11:30:57 crc kubenswrapper[4949]: W0216 11:30:57.849181 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74cdae97_ce6a_4653_a84d_6f46d9795fbb.slice/crio-8924c6378e26a9eaf8688f6e6979bb9fc5982c30040f0bc21de580aa2bf9f5c1 WatchSource:0}: Error finding container 8924c6378e26a9eaf8688f6e6979bb9fc5982c30040f0bc21de580aa2bf9f5c1: Status 404 returned error can't find the container with id 8924c6378e26a9eaf8688f6e6979bb9fc5982c30040f0bc21de580aa2bf9f5c1 Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.060772 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-wbd7n" event={"ID":"f40c7714-8669-4c33-8b1d-e3be853ca911","Type":"ContainerDied","Data":"6bd8d3a581d0985d084f930e63c9a3ae983cd463fb1e47fce172e71e56a228f4"} Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.061103 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6bd8d3a581d0985d084f930e63c9a3ae983cd463fb1e47fce172e71e56a228f4" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.061258 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-wbd7n" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.063840 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57d8c9758b-hz957" event={"ID":"74cdae97-ce6a-4653-a84d-6f46d9795fbb","Type":"ContainerStarted","Data":"8924c6378e26a9eaf8688f6e6979bb9fc5982c30040f0bc21de580aa2bf9f5c1"} Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.066550 4949 generic.go:334] "Generic (PLEG): container finished" podID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerID="4755f11655c6804f93135f9e28d881f0998c90db8faf1f8da415f625b1fe096c" exitCode=0 Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.076538 4949 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.076564 4949 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.076929 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-p69nz" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.072982 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" event={"ID":"7eac8d6e-e079-41ea-83cb-d4e3a553db13","Type":"ContainerDied","Data":"4755f11655c6804f93135f9e28d881f0998c90db8faf1f8da415f625b1fe096c"} Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.077363 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-ztzjs" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.156258 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" podUID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.185:5353: connect: connection refused" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.472250 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-778c8b6f4c-g5qht"] Feb 16 11:30:58 crc kubenswrapper[4949]: E0216 11:30:58.473153 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f40c7714-8669-4c33-8b1d-e3be853ca911" containerName="barbican-db-sync" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.473191 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f40c7714-8669-4c33-8b1d-e3be853ca911" containerName="barbican-db-sync" Feb 16 11:30:58 crc kubenswrapper[4949]: E0216 11:30:58.473210 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="667080fb-b428-4b48-87c9-a955ff09771a" containerName="heat-db-sync" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.473216 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="667080fb-b428-4b48-87c9-a955ff09771a" containerName="heat-db-sync" Feb 16 11:30:58 crc kubenswrapper[4949]: E0216 11:30:58.473236 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89c4baaa-f449-4fab-a513-1eec4a163af9" containerName="keystone-bootstrap" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.473246 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="89c4baaa-f449-4fab-a513-1eec4a163af9" containerName="keystone-bootstrap" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.473545 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f40c7714-8669-4c33-8b1d-e3be853ca911" containerName="barbican-db-sync" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.473566 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="89c4baaa-f449-4fab-a513-1eec4a163af9" containerName="keystone-bootstrap" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.473588 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="667080fb-b428-4b48-87c9-a955ff09771a" containerName="heat-db-sync" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.474985 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.485232 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.485467 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-2kzh5" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.485596 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.526222 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-ttzj9"] Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.528953 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.619864 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-logs\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.619983 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data-custom\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.620067 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.620203 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-combined-ca-bundle\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.620227 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vj2d\" (UniqueName: \"kubernetes.io/projected/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-kube-api-access-9vj2d\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.637746 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-59d764b56d-xgwkg"] Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.640025 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.642423 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.693860 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-59d764b56d-xgwkg"] Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.724015 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-ttzj9"] Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.727391 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data-custom\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.727504 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.727547 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.728835 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.728994 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plq9j\" (UniqueName: \"kubernetes.io/projected/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-kube-api-access-plq9j\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.729315 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.729363 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.730299 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-combined-ca-bundle\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.730516 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vj2d\" (UniqueName: \"kubernetes.io/projected/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-kube-api-access-9vj2d\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.730569 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-config\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.730646 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-logs\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.731322 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-logs\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.744135 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-778c8b6f4c-g5qht"] Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.756711 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.757399 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data-custom\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.767447 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vj2d\" (UniqueName: \"kubernetes.io/projected/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-kube-api-access-9vj2d\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.768681 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-combined-ca-bundle\") pod \"barbican-worker-778c8b6f4c-g5qht\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.816731 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.826820 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-59b8c4c946-z72ns"] Feb 16 11:30:58 crc kubenswrapper[4949]: E0216 11:30:58.827380 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerName="dnsmasq-dns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.827396 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerName="dnsmasq-dns" Feb 16 11:30:58 crc kubenswrapper[4949]: E0216 11:30:58.827426 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerName="init" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.827432 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerName="init" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.827625 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" containerName="dnsmasq-dns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.828491 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.832348 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.833234 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-combined-ca-bundle\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.834281 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.834535 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.834617 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.834781 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.836769 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-h4zml" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.838398 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data-custom\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.838555 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.844441 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.844528 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.844552 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-logs\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.844582 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plq9j\" (UniqueName: \"kubernetes.io/projected/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-kube-api-access-plq9j\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.844667 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x9wd\" (UniqueName: \"kubernetes.io/projected/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-kube-api-access-4x9wd\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.844748 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.844785 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.844998 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-config\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.846262 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-config\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.850552 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.850933 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.851138 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.853961 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.883187 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-59b8c4c946-z72ns"] Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.889073 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plq9j\" (UniqueName: \"kubernetes.io/projected/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-kube-api-access-plq9j\") pod \"dnsmasq-dns-75c8ddd69c-ttzj9\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.948075 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-sb\") pod \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.948270 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-nb\") pod \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.948367 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-svc\") pod \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.948451 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncwxw\" (UniqueName: \"kubernetes.io/projected/7eac8d6e-e079-41ea-83cb-d4e3a553db13-kube-api-access-ncwxw\") pod \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.948529 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-swift-storage-0\") pod \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.948692 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-config\") pod \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\" (UID: \"7eac8d6e-e079-41ea-83cb-d4e3a553db13\") " Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.949274 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-config-data\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.949715 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-scripts\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.949806 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-combined-ca-bundle\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.949915 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data-custom\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.950031 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.957021 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-fernet-keys\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.957273 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndnpz\" (UniqueName: \"kubernetes.io/projected/cf7adfe6-84e8-4873-86b3-275c617e3917-kube-api-access-ndnpz\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.957583 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-credential-keys\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.957750 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-logs\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.957901 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x9wd\" (UniqueName: \"kubernetes.io/projected/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-kube-api-access-4x9wd\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.957989 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-internal-tls-certs\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.960260 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-combined-ca-bundle\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.962594 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-public-tls-certs\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.964053 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-logs\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.960782 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7eac8d6e-e079-41ea-83cb-d4e3a553db13-kube-api-access-ncwxw" (OuterVolumeSpecName: "kube-api-access-ncwxw") pod "7eac8d6e-e079-41ea-83cb-d4e3a553db13" (UID: "7eac8d6e-e079-41ea-83cb-d4e3a553db13"). InnerVolumeSpecName "kube-api-access-ncwxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:30:58 crc kubenswrapper[4949]: I0216 11:30:58.966771 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-combined-ca-bundle\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.001458 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data-custom\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.013863 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.013905 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-86cc5fd6cd-qvlrg"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.015945 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.016390 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.025099 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.028634 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x9wd\" (UniqueName: \"kubernetes.io/projected/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-kube-api-access-4x9wd\") pod \"barbican-keystone-listener-59d764b56d-xgwkg\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.042411 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86cc5fd6cd-qvlrg"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.066726 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-fernet-keys\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.066776 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndnpz\" (UniqueName: \"kubernetes.io/projected/cf7adfe6-84e8-4873-86b3-275c617e3917-kube-api-access-ndnpz\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.066837 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-credential-keys\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.066891 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-internal-tls-certs\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.066940 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-combined-ca-bundle\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.066962 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-public-tls-certs\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.067002 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-config-data\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.067032 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-scripts\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.067108 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncwxw\" (UniqueName: \"kubernetes.io/projected/7eac8d6e-e079-41ea-83cb-d4e3a553db13-kube-api-access-ncwxw\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.083726 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.097428 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.098474 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-vhjrk" event={"ID":"7eac8d6e-e079-41ea-83cb-d4e3a553db13","Type":"ContainerDied","Data":"28f7cb3eac55faad405025d43b7dc7bb61b9c936903fd17b4ea1fd7490c44753"} Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.098531 4949 scope.go:117] "RemoveContainer" containerID="4755f11655c6804f93135f9e28d881f0998c90db8faf1f8da415f625b1fe096c" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.100808 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-internal-tls-certs\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.103424 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57d8c9758b-hz957" event={"ID":"74cdae97-ce6a-4653-a84d-6f46d9795fbb","Type":"ContainerStarted","Data":"30dd2127088aa2dec617aca580389796ed51e24350de342faadf463373e71c1e"} Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.103475 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57d8c9758b-hz957" event={"ID":"74cdae97-ce6a-4653-a84d-6f46d9795fbb","Type":"ContainerStarted","Data":"61eea95e9c0cc8ca6196d4f1b0b70447afa54bc0a4be198777c23caacda7d1e4"} Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.103744 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.103823 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.105630 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-scripts\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.106046 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-config-data\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.106119 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6f5cdc9c5-rfrdp"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.108109 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.109988 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndnpz\" (UniqueName: \"kubernetes.io/projected/cf7adfe6-84e8-4873-86b3-275c617e3917-kube-api-access-ndnpz\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.111505 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-credential-keys\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.112681 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-public-tls-certs\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.121089 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.123182 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-combined-ca-bundle\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.123937 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cf7adfe6-84e8-4873-86b3-275c617e3917-fernet-keys\") pod \"keystone-59b8c4c946-z72ns\" (UID: \"cf7adfe6-84e8-4873-86b3-275c617e3917\") " pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.133279 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-64879558f4-wnmkq"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.135873 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.162754 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.177869 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data-custom\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.178423 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k489\" (UniqueName: \"kubernetes.io/projected/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-kube-api-access-6k489\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.178510 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-logs\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.178766 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.178838 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-combined-ca-bundle\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.195251 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-565968d4c6-rwgp4"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.198593 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.203722 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6f5cdc9c5-rfrdp"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.218693 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-565968d4c6-rwgp4"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.259849 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7eac8d6e-e079-41ea-83cb-d4e3a553db13" (UID: "7eac8d6e-e079-41ea-83cb-d4e3a553db13"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.289552 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8wml\" (UniqueName: \"kubernetes.io/projected/52de476f-078d-48ff-a705-b647c492b187-kube-api-access-m8wml\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.289716 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-config-data\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.306344 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k489\" (UniqueName: \"kubernetes.io/projected/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-kube-api-access-6k489\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.306741 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-logs\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.306864 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-config-data-custom\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.307229 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-combined-ca-bundle\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.307430 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.307591 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-combined-ca-bundle\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.307684 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-config-data\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.307816 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data-custom\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.307977 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g64np\" (UniqueName: \"kubernetes.io/projected/942ae44c-f919-40ba-b0a0-eb112962e586-kube-api-access-g64np\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.308147 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-combined-ca-bundle\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.308279 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/942ae44c-f919-40ba-b0a0-eb112962e586-logs\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.308431 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52de476f-078d-48ff-a705-b647c492b187-logs\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.308556 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-config-data-custom\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.308780 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.315419 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-logs\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.337922 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-57d8c9758b-hz957" podStartSLOduration=7.337896386 podStartE2EDuration="7.337896386s" podCreationTimestamp="2026-02-16 11:30:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:30:59.133620747 +0000 UTC m=+1448.762954902" watchObservedRunningTime="2026-02-16 11:30:59.337896386 +0000 UTC m=+1448.967230551" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.365824 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-config" (OuterVolumeSpecName: "config") pod "7eac8d6e-e079-41ea-83cb-d4e3a553db13" (UID: "7eac8d6e-e079-41ea-83cb-d4e3a553db13"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421093 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g64np\" (UniqueName: \"kubernetes.io/projected/942ae44c-f919-40ba-b0a0-eb112962e586-kube-api-access-g64np\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421488 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-combined-ca-bundle\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421521 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/942ae44c-f919-40ba-b0a0-eb112962e586-logs\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421557 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-config-data-custom\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421574 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52de476f-078d-48ff-a705-b647c492b187-logs\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421596 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8wml\" (UniqueName: \"kubernetes.io/projected/52de476f-078d-48ff-a705-b647c492b187-kube-api-access-m8wml\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421617 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data-custom\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421635 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-config-data\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.421675 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-combined-ca-bundle\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.425635 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/942ae44c-f919-40ba-b0a0-eb112962e586-logs\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.427189 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52de476f-078d-48ff-a705-b647c492b187-logs\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.439477 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7eac8d6e-e079-41ea-83cb-d4e3a553db13" (UID: "7eac8d6e-e079-41ea-83cb-d4e3a553db13"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.447272 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c476q\" (UniqueName: \"kubernetes.io/projected/aa9e92a7-29ce-4802-961f-3ab63430f40e-kube-api-access-c476q\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.447483 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-config-data-custom\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.447600 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa9e92a7-29ce-4802-961f-3ab63430f40e-logs\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.447784 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-combined-ca-bundle\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.447868 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.447986 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-config-data\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.448231 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.477459 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7eac8d6e-e079-41ea-83cb-d4e3a553db13" (UID: "7eac8d6e-e079-41ea-83cb-d4e3a553db13"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.496700 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-combined-ca-bundle\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.497637 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data-custom\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.506466 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.537572 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k489\" (UniqueName: \"kubernetes.io/projected/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-kube-api-access-6k489\") pod \"barbican-api-86cc5fd6cd-qvlrg\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.538336 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-combined-ca-bundle\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.541091 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-combined-ca-bundle\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.541623 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-config-data-custom\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.541891 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-config-data-custom\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.550079 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.550250 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data-custom\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.550291 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-combined-ca-bundle\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.551521 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52de476f-078d-48ff-a705-b647c492b187-config-data\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.553335 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8wml\" (UniqueName: \"kubernetes.io/projected/52de476f-078d-48ff-a705-b647c492b187-kube-api-access-m8wml\") pod \"barbican-worker-6f5cdc9c5-rfrdp\" (UID: \"52de476f-078d-48ff-a705-b647c492b187\") " pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.553570 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/942ae44c-f919-40ba-b0a0-eb112962e586-config-data\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.560347 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c476q\" (UniqueName: \"kubernetes.io/projected/aa9e92a7-29ce-4802-961f-3ab63430f40e-kube-api-access-c476q\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.560564 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa9e92a7-29ce-4802-961f-3ab63430f40e-logs\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.560848 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.560868 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.561270 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa9e92a7-29ce-4802-961f-3ab63430f40e-logs\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.561807 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g64np\" (UniqueName: \"kubernetes.io/projected/942ae44c-f919-40ba-b0a0-eb112962e586-kube-api-access-g64np\") pod \"barbican-keystone-listener-64879558f4-wnmkq\" (UID: \"942ae44c-f919-40ba-b0a0-eb112962e586\") " pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.601871 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.617806 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7eac8d6e-e079-41ea-83cb-d4e3a553db13" (UID: "7eac8d6e-e079-41ea-83cb-d4e3a553db13"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.628903 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data-custom\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.634499 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c476q\" (UniqueName: \"kubernetes.io/projected/aa9e92a7-29ce-4802-961f-3ab63430f40e-kube-api-access-c476q\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.645774 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-combined-ca-bundle\") pod \"barbican-api-565968d4c6-rwgp4\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.676941 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-64879558f4-wnmkq"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.677030 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7eac8d6e-e079-41ea-83cb-d4e3a553db13-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.886004 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.887933 4949 scope.go:117] "RemoveContainer" containerID="e9fa1d9703742054003fda04f2977335dba3141aee9a450853223e312c93dbeb" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.911320 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.935499 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-vhjrk"] Feb 16 11:30:59 crc kubenswrapper[4949]: I0216 11:30:59.951162 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.001450 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-vhjrk"] Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.095990 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.164750 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g76dr" event={"ID":"acc263a1-4f57-4dca-bcc5-5d5388539a5d","Type":"ContainerStarted","Data":"4ffb6c751bef37d00aac44d4842949bcbf1991d78f3befd1670ea34e72a639b9"} Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.200639 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-g76dr" podStartSLOduration=18.254571079 podStartE2EDuration="29.20061746s" podCreationTimestamp="2026-02-16 11:30:31 +0000 UTC" firstStartedPulling="2026-02-16 11:30:47.673342129 +0000 UTC m=+1437.302676294" lastFinishedPulling="2026-02-16 11:30:58.61938851 +0000 UTC m=+1448.248722675" observedRunningTime="2026-02-16 11:31:00.181917967 +0000 UTC m=+1449.811252132" watchObservedRunningTime="2026-02-16 11:31:00.20061746 +0000 UTC m=+1449.829951625" Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.286829 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-778c8b6f4c-g5qht"] Feb 16 11:31:00 crc kubenswrapper[4949]: W0216 11:31:00.300600 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93df10b9_3ead_478c_9dd0_d7fbd3242ddb.slice/crio-71bd2467eab360d2ef498394c98059157dfee6721d05b142cdda3251aacd107d WatchSource:0}: Error finding container 71bd2467eab360d2ef498394c98059157dfee6721d05b142cdda3251aacd107d: Status 404 returned error can't find the container with id 71bd2467eab360d2ef498394c98059157dfee6721d05b142cdda3251aacd107d Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.447798 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-59d764b56d-xgwkg"] Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.614804 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.615763 4949 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.706877 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-59b8c4c946-z72ns"] Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.709333 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.709486 4949 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.722978 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-ttzj9"] Feb 16 11:31:00 crc kubenswrapper[4949]: I0216 11:31:00.873710 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.135560 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.314922 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7eac8d6e-e079-41ea-83cb-d4e3a553db13" path="/var/lib/kubelet/pods/7eac8d6e-e079-41ea-83cb-d4e3a553db13/volumes" Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.316015 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6f5cdc9c5-rfrdp"] Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.316064 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-59b8c4c946-z72ns" event={"ID":"cf7adfe6-84e8-4873-86b3-275c617e3917","Type":"ContainerStarted","Data":"f9d4c922c87b4936e69a5bb30191191ae73511d684cadd3d4f26ea464d7e6805"} Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.316095 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" event={"ID":"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5","Type":"ContainerStarted","Data":"a41f266cf3c717a0322e98934ea17654d3373796d0c54550b81435f4e6bdd676"} Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.316119 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" event={"ID":"9dc25628-882d-4ad4-a5ee-23e3b3d14abd","Type":"ContainerStarted","Data":"71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39"} Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.316137 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" event={"ID":"9dc25628-882d-4ad4-a5ee-23e3b3d14abd","Type":"ContainerStarted","Data":"e77841fe9f11feb562a3471a916ce5267dbaaebf6a541c77361575093761df75"} Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.316151 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-778c8b6f4c-g5qht" event={"ID":"93df10b9-3ead-478c-9dd0-d7fbd3242ddb","Type":"ContainerStarted","Data":"71bd2467eab360d2ef498394c98059157dfee6721d05b142cdda3251aacd107d"} Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.321664 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c59b957e-c5f8-463f-8228-1051225f5140","Type":"ContainerStarted","Data":"e12f9ac9cc1163e7fbc665fa1788a710a8045ef5a8e189c7103ed2fe5d1a1a79"} Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.350124 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-565968d4c6-rwgp4"] Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.436578 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86cc5fd6cd-qvlrg"] Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.506475 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-64879558f4-wnmkq"] Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.840530 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:31:01 crc kubenswrapper[4949]: I0216 11:31:01.841243 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.382009 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" event={"ID":"52108945-9ec3-4a39-9f9b-e6a79ea4adc7","Type":"ContainerStarted","Data":"55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.382351 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" event={"ID":"52108945-9ec3-4a39-9f9b-e6a79ea4adc7","Type":"ContainerStarted","Data":"fd06c83eb197066eb5018f4aa8d20bab12b4fd67ff3c70af35b32b664c1bf679"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.452542 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" event={"ID":"52de476f-078d-48ff-a705-b647c492b187","Type":"ContainerStarted","Data":"87b4f4595f8dada08195862ae7cd2038a9830468d6006c82c845330fe8b90ca5"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.456699 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-59b8c4c946-z72ns" event={"ID":"cf7adfe6-84e8-4873-86b3-275c617e3917","Type":"ContainerStarted","Data":"224feba58c3eccac2a559dc3c48caec67320a66872438a115e44cbdb8c39a62f"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.457825 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.481512 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" event={"ID":"942ae44c-f919-40ba-b0a0-eb112962e586","Type":"ContainerStarted","Data":"2dc863a6c7a86b71b396e91ff03d412de4e636fd095a2312fc0e8bc39d71c5c6"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.506414 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-59b8c4c946-z72ns" podStartSLOduration=4.506385972 podStartE2EDuration="4.506385972s" podCreationTimestamp="2026-02-16 11:30:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:02.4807397 +0000 UTC m=+1452.110073865" watchObservedRunningTime="2026-02-16 11:31:02.506385972 +0000 UTC m=+1452.135720137" Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.523414 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-565968d4c6-rwgp4" event={"ID":"aa9e92a7-29ce-4802-961f-3ab63430f40e","Type":"ContainerStarted","Data":"f162a4936bf79b205b68dacda2a34d160a023c93d18e1aab43678625d47abedb"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.523466 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-565968d4c6-rwgp4" event={"ID":"aa9e92a7-29ce-4802-961f-3ab63430f40e","Type":"ContainerStarted","Data":"7ec9172359a39d02ffac4bf8b657bed4e084a3be6ae52d32b6d9588e163cf989"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.539582 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" containerID="71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39" exitCode=0 Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.539638 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" event={"ID":"9dc25628-882d-4ad4-a5ee-23e3b3d14abd","Type":"ContainerDied","Data":"71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.539666 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" event={"ID":"9dc25628-882d-4ad4-a5ee-23e3b3d14abd","Type":"ContainerStarted","Data":"1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9"} Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.541802 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:31:02 crc kubenswrapper[4949]: I0216 11:31:02.567677 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" podStartSLOduration=4.567660081 podStartE2EDuration="4.567660081s" podCreationTimestamp="2026-02-16 11:30:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:02.564961364 +0000 UTC m=+1452.194295549" watchObservedRunningTime="2026-02-16 11:31:02.567660081 +0000 UTC m=+1452.196994246" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.085036 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g76dr" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" probeResult="failure" output=< Feb 16 11:31:03 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:31:03 crc kubenswrapper[4949]: > Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.565686 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-565968d4c6-rwgp4"] Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.638505 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6dd8f56bbd-txvgd"] Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.675348 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6dd8f56bbd-txvgd"] Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.675487 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.691134 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.691752 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.714248 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vh5z2" event={"ID":"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f","Type":"ContainerStarted","Data":"b0a479d1b097c38dd3d6e4a671dd77bb73bff6f13bf66a1d12aacfdaded92d57"} Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.768582 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" event={"ID":"52108945-9ec3-4a39-9f9b-e6a79ea4adc7","Type":"ContainerStarted","Data":"d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24"} Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.769019 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.769415 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.811786 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-565968d4c6-rwgp4" event={"ID":"aa9e92a7-29ce-4802-961f-3ab63430f40e","Type":"ContainerStarted","Data":"d6519a8d6f84619380f8b0534c178f794054d2ef1273cebd5adb065f2583519a"} Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.811974 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.812099 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.815162 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-internal-tls-certs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.815228 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-config-data-custom\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.815309 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qrlx\" (UniqueName: \"kubernetes.io/projected/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-kube-api-access-7qrlx\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.815347 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-vh5z2" podStartSLOduration=5.322132369 podStartE2EDuration="46.815317901s" podCreationTimestamp="2026-02-16 11:30:17 +0000 UTC" firstStartedPulling="2026-02-16 11:30:19.295837823 +0000 UTC m=+1408.925171988" lastFinishedPulling="2026-02-16 11:31:00.789023355 +0000 UTC m=+1450.418357520" observedRunningTime="2026-02-16 11:31:03.773709653 +0000 UTC m=+1453.403043818" watchObservedRunningTime="2026-02-16 11:31:03.815317901 +0000 UTC m=+1453.444652056" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.815500 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-combined-ca-bundle\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.818800 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-public-tls-certs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.819737 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-logs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.819826 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-config-data\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.854140 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" podStartSLOduration=5.854115088 podStartE2EDuration="5.854115088s" podCreationTimestamp="2026-02-16 11:30:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:03.821521048 +0000 UTC m=+1453.450855213" watchObservedRunningTime="2026-02-16 11:31:03.854115088 +0000 UTC m=+1453.483449243" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.874305 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-565968d4c6-rwgp4" podStartSLOduration=5.874282884 podStartE2EDuration="5.874282884s" podCreationTimestamp="2026-02-16 11:30:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:03.856754693 +0000 UTC m=+1453.486088848" watchObservedRunningTime="2026-02-16 11:31:03.874282884 +0000 UTC m=+1453.503617039" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.921613 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-public-tls-certs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.921680 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-logs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.921744 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-config-data\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.921794 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-internal-tls-certs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.921839 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-config-data-custom\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.921940 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qrlx\" (UniqueName: \"kubernetes.io/projected/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-kube-api-access-7qrlx\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.922222 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-combined-ca-bundle\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.924170 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-logs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.929493 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-combined-ca-bundle\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.937693 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-config-data\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.942467 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-internal-tls-certs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.944849 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-public-tls-certs\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.945020 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-config-data-custom\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:03 crc kubenswrapper[4949]: I0216 11:31:03.956263 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qrlx\" (UniqueName: \"kubernetes.io/projected/1a5a86a0-f27c-4c48-a9fb-2dfdca066751-kube-api-access-7qrlx\") pod \"barbican-api-6dd8f56bbd-txvgd\" (UID: \"1a5a86a0-f27c-4c48-a9fb-2dfdca066751\") " pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:04 crc kubenswrapper[4949]: I0216 11:31:04.027577 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:04 crc kubenswrapper[4949]: I0216 11:31:04.820419 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-565968d4c6-rwgp4" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerName="barbican-api-log" containerID="cri-o://f162a4936bf79b205b68dacda2a34d160a023c93d18e1aab43678625d47abedb" gracePeriod=30 Feb 16 11:31:04 crc kubenswrapper[4949]: I0216 11:31:04.821428 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-565968d4c6-rwgp4" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerName="barbican-api" containerID="cri-o://d6519a8d6f84619380f8b0534c178f794054d2ef1273cebd5adb065f2583519a" gracePeriod=30 Feb 16 11:31:05 crc kubenswrapper[4949]: I0216 11:31:05.861572 4949 generic.go:334] "Generic (PLEG): container finished" podID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerID="d6519a8d6f84619380f8b0534c178f794054d2ef1273cebd5adb065f2583519a" exitCode=0 Feb 16 11:31:05 crc kubenswrapper[4949]: I0216 11:31:05.862117 4949 generic.go:334] "Generic (PLEG): container finished" podID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerID="f162a4936bf79b205b68dacda2a34d160a023c93d18e1aab43678625d47abedb" exitCode=143 Feb 16 11:31:05 crc kubenswrapper[4949]: I0216 11:31:05.861754 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-565968d4c6-rwgp4" event={"ID":"aa9e92a7-29ce-4802-961f-3ab63430f40e","Type":"ContainerDied","Data":"d6519a8d6f84619380f8b0534c178f794054d2ef1273cebd5adb065f2583519a"} Feb 16 11:31:05 crc kubenswrapper[4949]: I0216 11:31:05.862165 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-565968d4c6-rwgp4" event={"ID":"aa9e92a7-29ce-4802-961f-3ab63430f40e","Type":"ContainerDied","Data":"f162a4936bf79b205b68dacda2a34d160a023c93d18e1aab43678625d47abedb"} Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.604502 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.779961 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-combined-ca-bundle\") pod \"aa9e92a7-29ce-4802-961f-3ab63430f40e\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.780348 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa9e92a7-29ce-4802-961f-3ab63430f40e-logs\") pod \"aa9e92a7-29ce-4802-961f-3ab63430f40e\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.780602 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c476q\" (UniqueName: \"kubernetes.io/projected/aa9e92a7-29ce-4802-961f-3ab63430f40e-kube-api-access-c476q\") pod \"aa9e92a7-29ce-4802-961f-3ab63430f40e\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.780638 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data\") pod \"aa9e92a7-29ce-4802-961f-3ab63430f40e\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.780674 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data-custom\") pod \"aa9e92a7-29ce-4802-961f-3ab63430f40e\" (UID: \"aa9e92a7-29ce-4802-961f-3ab63430f40e\") " Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.781275 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa9e92a7-29ce-4802-961f-3ab63430f40e-logs" (OuterVolumeSpecName: "logs") pod "aa9e92a7-29ce-4802-961f-3ab63430f40e" (UID: "aa9e92a7-29ce-4802-961f-3ab63430f40e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.782444 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa9e92a7-29ce-4802-961f-3ab63430f40e-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.786826 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa9e92a7-29ce-4802-961f-3ab63430f40e-kube-api-access-c476q" (OuterVolumeSpecName: "kube-api-access-c476q") pod "aa9e92a7-29ce-4802-961f-3ab63430f40e" (UID: "aa9e92a7-29ce-4802-961f-3ab63430f40e"). InnerVolumeSpecName "kube-api-access-c476q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.790447 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "aa9e92a7-29ce-4802-961f-3ab63430f40e" (UID: "aa9e92a7-29ce-4802-961f-3ab63430f40e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.833663 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aa9e92a7-29ce-4802-961f-3ab63430f40e" (UID: "aa9e92a7-29ce-4802-961f-3ab63430f40e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.888253 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c476q\" (UniqueName: \"kubernetes.io/projected/aa9e92a7-29ce-4802-961f-3ab63430f40e-kube-api-access-c476q\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.888286 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.888298 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.908123 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-565968d4c6-rwgp4" event={"ID":"aa9e92a7-29ce-4802-961f-3ab63430f40e","Type":"ContainerDied","Data":"7ec9172359a39d02ffac4bf8b657bed4e084a3be6ae52d32b6d9588e163cf989"} Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.908432 4949 scope.go:117] "RemoveContainer" containerID="d6519a8d6f84619380f8b0534c178f794054d2ef1273cebd5adb065f2583519a" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.908764 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-565968d4c6-rwgp4" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.953704 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data" (OuterVolumeSpecName: "config-data") pod "aa9e92a7-29ce-4802-961f-3ab63430f40e" (UID: "aa9e92a7-29ce-4802-961f-3ab63430f40e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.981594 4949 scope.go:117] "RemoveContainer" containerID="f162a4936bf79b205b68dacda2a34d160a023c93d18e1aab43678625d47abedb" Feb 16 11:31:06 crc kubenswrapper[4949]: I0216 11:31:06.990334 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa9e92a7-29ce-4802-961f-3ab63430f40e-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.021509 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6dd8f56bbd-txvgd"] Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.296377 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-565968d4c6-rwgp4"] Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.320407 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-565968d4c6-rwgp4"] Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.933022 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-778c8b6f4c-g5qht" event={"ID":"93df10b9-3ead-478c-9dd0-d7fbd3242ddb","Type":"ContainerStarted","Data":"ac8a17d19f086f2d7cae416be2d1b025d2c92c16a5ceea66e7b15e74dfe8955d"} Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.940988 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" event={"ID":"52de476f-078d-48ff-a705-b647c492b187","Type":"ContainerStarted","Data":"80c98321183d03a64bbbf8a6f2883502e2dbe6805a161c2766945e61cf2e9a8d"} Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.941103 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" event={"ID":"52de476f-078d-48ff-a705-b647c492b187","Type":"ContainerStarted","Data":"9d6736fef3bb17d4856121a5b2a296ca0d11c138cd3a95e8e727f499d002979e"} Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.969567 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" event={"ID":"942ae44c-f919-40ba-b0a0-eb112962e586","Type":"ContainerStarted","Data":"3050c57da4615addbb7d50229054c86fa5367e7e6acc19c575551e91bdf73392"} Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.969623 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" event={"ID":"942ae44c-f919-40ba-b0a0-eb112962e586","Type":"ContainerStarted","Data":"4c6076d2377df7f9b5b33c9694252de204d94060ab4613103958a5f9d2c6530e"} Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.973280 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6f5cdc9c5-rfrdp" podStartSLOduration=4.807053683 podStartE2EDuration="9.9730459s" podCreationTimestamp="2026-02-16 11:30:58 +0000 UTC" firstStartedPulling="2026-02-16 11:31:01.361742061 +0000 UTC m=+1450.991076226" lastFinishedPulling="2026-02-16 11:31:06.527734278 +0000 UTC m=+1456.157068443" observedRunningTime="2026-02-16 11:31:07.970429855 +0000 UTC m=+1457.599764040" watchObservedRunningTime="2026-02-16 11:31:07.9730459 +0000 UTC m=+1457.602380065" Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.986839 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" event={"ID":"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5","Type":"ContainerStarted","Data":"787115588af80eca11d98588f198014f1337e7320b4940c5371cd5cca6c98580"} Feb 16 11:31:07 crc kubenswrapper[4949]: I0216 11:31:07.987264 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" event={"ID":"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5","Type":"ContainerStarted","Data":"3b87fce00b2c7c023e7f41d779803404b3a9c8cd34ed274a57736eb479c3c072"} Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.017475 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-64879558f4-wnmkq" podStartSLOduration=4.9502176989999995 podStartE2EDuration="10.017447767s" podCreationTimestamp="2026-02-16 11:30:58 +0000 UTC" firstStartedPulling="2026-02-16 11:31:01.44965277 +0000 UTC m=+1451.078986935" lastFinishedPulling="2026-02-16 11:31:06.516882838 +0000 UTC m=+1456.146217003" observedRunningTime="2026-02-16 11:31:07.993696689 +0000 UTC m=+1457.623030854" watchObservedRunningTime="2026-02-16 11:31:08.017447767 +0000 UTC m=+1457.646781932" Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.032533 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6dd8f56bbd-txvgd" event={"ID":"1a5a86a0-f27c-4c48-a9fb-2dfdca066751","Type":"ContainerStarted","Data":"10083c9eefa549ebc0a8d730616b5feff259a2e54f6248744fe2fbb4641f319e"} Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.032595 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6dd8f56bbd-txvgd" event={"ID":"1a5a86a0-f27c-4c48-a9fb-2dfdca066751","Type":"ContainerStarted","Data":"a31aecf40df8f2b9d7d62556a3967e89b04643a46f1f96d782a476abef961eff"} Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.033098 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.034089 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.073319 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-778c8b6f4c-g5qht"] Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.104460 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-59d764b56d-xgwkg"] Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.109271 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" podStartSLOduration=4.126498059 podStartE2EDuration="10.109239697s" podCreationTimestamp="2026-02-16 11:30:58 +0000 UTC" firstStartedPulling="2026-02-16 11:31:00.475549167 +0000 UTC m=+1450.104883332" lastFinishedPulling="2026-02-16 11:31:06.458290805 +0000 UTC m=+1456.087624970" observedRunningTime="2026-02-16 11:31:08.053109105 +0000 UTC m=+1457.682443280" watchObservedRunningTime="2026-02-16 11:31:08.109239697 +0000 UTC m=+1457.738573862" Feb 16 11:31:08 crc kubenswrapper[4949]: I0216 11:31:08.123423 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6dd8f56bbd-txvgd" podStartSLOduration=5.123398531 podStartE2EDuration="5.123398531s" podCreationTimestamp="2026-02-16 11:31:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:08.077234844 +0000 UTC m=+1457.706569009" watchObservedRunningTime="2026-02-16 11:31:08.123398531 +0000 UTC m=+1457.752732696" Feb 16 11:31:09 crc kubenswrapper[4949]: I0216 11:31:09.052856 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6dd8f56bbd-txvgd" event={"ID":"1a5a86a0-f27c-4c48-a9fb-2dfdca066751","Type":"ContainerStarted","Data":"9d160b65b405f9efcc7626603f66008892d82858b667efe6fa62fde5134ddb84"} Feb 16 11:31:09 crc kubenswrapper[4949]: I0216 11:31:09.056528 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-778c8b6f4c-g5qht" event={"ID":"93df10b9-3ead-478c-9dd0-d7fbd3242ddb","Type":"ContainerStarted","Data":"e346b0c500dffe767c26dd615243a95e3628b0906861d6aae148302a7ac533ad"} Feb 16 11:31:09 crc kubenswrapper[4949]: I0216 11:31:09.087800 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-778c8b6f4c-g5qht" podStartSLOduration=4.876430754 podStartE2EDuration="11.087769116s" podCreationTimestamp="2026-02-16 11:30:58 +0000 UTC" firstStartedPulling="2026-02-16 11:31:00.304020582 +0000 UTC m=+1449.933354747" lastFinishedPulling="2026-02-16 11:31:06.515358944 +0000 UTC m=+1456.144693109" observedRunningTime="2026-02-16 11:31:09.082075344 +0000 UTC m=+1458.711409529" watchObservedRunningTime="2026-02-16 11:31:09.087769116 +0000 UTC m=+1458.717103281" Feb 16 11:31:09 crc kubenswrapper[4949]: I0216 11:31:09.088209 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:31:09 crc kubenswrapper[4949]: I0216 11:31:09.197580 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-blz5m"] Feb 16 11:31:09 crc kubenswrapper[4949]: I0216 11:31:09.197902 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" podUID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" containerName="dnsmasq-dns" containerID="cri-o://9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97" gracePeriod=10 Feb 16 11:31:09 crc kubenswrapper[4949]: I0216 11:31:09.266134 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" path="/var/lib/kubelet/pods/aa9e92a7-29ce-4802-961f-3ab63430f40e/volumes" Feb 16 11:31:09 crc kubenswrapper[4949]: I0216 11:31:09.880162 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.014342 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-sb\") pod \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.014514 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8x7c\" (UniqueName: \"kubernetes.io/projected/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-kube-api-access-g8x7c\") pod \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.014685 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-config\") pod \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.014767 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-nb\") pod \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.014816 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-swift-storage-0\") pod \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.015009 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-svc\") pod \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\" (UID: \"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d\") " Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.025851 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-kube-api-access-g8x7c" (OuterVolumeSpecName: "kube-api-access-g8x7c") pod "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" (UID: "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d"). InnerVolumeSpecName "kube-api-access-g8x7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.120364 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8x7c\" (UniqueName: \"kubernetes.io/projected/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-kube-api-access-g8x7c\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.169068 4949 generic.go:334] "Generic (PLEG): container finished" podID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" containerID="9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97" exitCode=0 Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.169700 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerName="barbican-keystone-listener-log" containerID="cri-o://3b87fce00b2c7c023e7f41d779803404b3a9c8cd34ed274a57736eb479c3c072" gracePeriod=30 Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.170036 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.170997 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" event={"ID":"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d","Type":"ContainerDied","Data":"9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97"} Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.171138 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-blz5m" event={"ID":"24bc9e69-eda6-45c5-b43a-0dbe44f2d12d","Type":"ContainerDied","Data":"3639e576777fbe13a9beac39844bc62e1db9a3f2fd222c8cc3684f8e1eb0214e"} Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.171269 4949 scope.go:117] "RemoveContainer" containerID="9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.174335 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerName="barbican-keystone-listener" containerID="cri-o://787115588af80eca11d98588f198014f1337e7320b4940c5371cd5cca6c98580" gracePeriod=30 Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.174595 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-778c8b6f4c-g5qht" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerName="barbican-worker-log" containerID="cri-o://ac8a17d19f086f2d7cae416be2d1b025d2c92c16a5ceea66e7b15e74dfe8955d" gracePeriod=30 Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.174757 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-778c8b6f4c-g5qht" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerName="barbican-worker" containerID="cri-o://e346b0c500dffe767c26dd615243a95e3628b0906861d6aae148302a7ac533ad" gracePeriod=30 Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.214420 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" (UID: "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.232370 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.235938 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" (UID: "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.282391 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" (UID: "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.288313 4949 scope.go:117] "RemoveContainer" containerID="aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.300834 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" (UID: "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.339479 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.339524 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.339541 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.365689 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-config" (OuterVolumeSpecName: "config") pod "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" (UID: "24bc9e69-eda6-45c5-b43a-0dbe44f2d12d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.451609 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.468174 4949 scope.go:117] "RemoveContainer" containerID="9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97" Feb 16 11:31:10 crc kubenswrapper[4949]: E0216 11:31:10.473365 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97\": container with ID starting with 9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97 not found: ID does not exist" containerID="9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.473418 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97"} err="failed to get container status \"9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97\": rpc error: code = NotFound desc = could not find container \"9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97\": container with ID starting with 9293d69619cae7dbeacdd64580a5b3ce36088a87324edcad473f241a5a539d97 not found: ID does not exist" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.473450 4949 scope.go:117] "RemoveContainer" containerID="aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c" Feb 16 11:31:10 crc kubenswrapper[4949]: E0216 11:31:10.478256 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c\": container with ID starting with aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c not found: ID does not exist" containerID="aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.478284 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c"} err="failed to get container status \"aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c\": rpc error: code = NotFound desc = could not find container \"aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c\": container with ID starting with aaaaf2bb9720c1477b6b7377b6ac9a845bb97e95f9e4441032d5fb4fa6166b6c not found: ID does not exist" Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.609752 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-blz5m"] Feb 16 11:31:10 crc kubenswrapper[4949]: I0216 11:31:10.624352 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-blz5m"] Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.204114 4949 generic.go:334] "Generic (PLEG): container finished" podID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerID="e346b0c500dffe767c26dd615243a95e3628b0906861d6aae148302a7ac533ad" exitCode=0 Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.204452 4949 generic.go:334] "Generic (PLEG): container finished" podID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerID="ac8a17d19f086f2d7cae416be2d1b025d2c92c16a5ceea66e7b15e74dfe8955d" exitCode=143 Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.204358 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-778c8b6f4c-g5qht" event={"ID":"93df10b9-3ead-478c-9dd0-d7fbd3242ddb","Type":"ContainerDied","Data":"e346b0c500dffe767c26dd615243a95e3628b0906861d6aae148302a7ac533ad"} Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.204585 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-778c8b6f4c-g5qht" event={"ID":"93df10b9-3ead-478c-9dd0-d7fbd3242ddb","Type":"ContainerDied","Data":"ac8a17d19f086f2d7cae416be2d1b025d2c92c16a5ceea66e7b15e74dfe8955d"} Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.231188 4949 generic.go:334] "Generic (PLEG): container finished" podID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerID="787115588af80eca11d98588f198014f1337e7320b4940c5371cd5cca6c98580" exitCode=0 Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.231229 4949 generic.go:334] "Generic (PLEG): container finished" podID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerID="3b87fce00b2c7c023e7f41d779803404b3a9c8cd34ed274a57736eb479c3c072" exitCode=143 Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.231377 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" event={"ID":"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5","Type":"ContainerDied","Data":"787115588af80eca11d98588f198014f1337e7320b4940c5371cd5cca6c98580"} Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.231414 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" event={"ID":"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5","Type":"ContainerDied","Data":"3b87fce00b2c7c023e7f41d779803404b3a9c8cd34ed274a57736eb479c3c072"} Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.239589 4949 generic.go:334] "Generic (PLEG): container finished" podID="4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" containerID="b0a479d1b097c38dd3d6e4a671dd77bb73bff6f13bf66a1d12aacfdaded92d57" exitCode=0 Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.267048 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" path="/var/lib/kubelet/pods/24bc9e69-eda6-45c5-b43a-0dbe44f2d12d/volumes" Feb 16 11:31:11 crc kubenswrapper[4949]: I0216 11:31:11.268681 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vh5z2" event={"ID":"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f","Type":"ContainerDied","Data":"b0a479d1b097c38dd3d6e4a671dd77bb73bff6f13bf66a1d12aacfdaded92d57"} Feb 16 11:31:12 crc kubenswrapper[4949]: I0216 11:31:12.131698 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:31:12 crc kubenswrapper[4949]: I0216 11:31:12.299454 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:31:12 crc kubenswrapper[4949]: I0216 11:31:12.898222 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g76dr" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" probeResult="failure" output=< Feb 16 11:31:12 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:31:12 crc kubenswrapper[4949]: > Feb 16 11:31:15 crc kubenswrapper[4949]: I0216 11:31:15.785463 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.020967 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6dd8f56bbd-txvgd" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.031107 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.127678 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-86cc5fd6cd-qvlrg"] Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.127939 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api-log" containerID="cri-o://55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7" gracePeriod=30 Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.128584 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api" containerID="cri-o://d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24" gracePeriod=30 Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.138763 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-scripts\") pod \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.138838 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsjfm\" (UniqueName: \"kubernetes.io/projected/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-kube-api-access-vsjfm\") pod \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.138871 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-db-sync-config-data\") pod \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.139108 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-combined-ca-bundle\") pod \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.139133 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-etc-machine-id\") pod \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.139226 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-config-data\") pod \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\" (UID: \"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f\") " Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.146312 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": EOF" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.146727 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" (UID: "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.152677 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-scripts" (OuterVolumeSpecName: "scripts") pod "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" (UID: "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.157620 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-kube-api-access-vsjfm" (OuterVolumeSpecName: "kube-api-access-vsjfm") pod "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" (UID: "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f"). InnerVolumeSpecName "kube-api-access-vsjfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.168375 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" (UID: "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.239830 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" (UID: "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.251252 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.251285 4949 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.251296 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.251305 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsjfm\" (UniqueName: \"kubernetes.io/projected/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-kube-api-access-vsjfm\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.251315 4949 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.272994 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-config-data" (OuterVolumeSpecName: "config-data") pod "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" (UID: "4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.351136 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vh5z2" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.352108 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vh5z2" event={"ID":"4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f","Type":"ContainerDied","Data":"81057128bb902c4a82b2d4b8fd47d86f6c7f19db48cd55facbecaf8989423567"} Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.352293 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81057128bb902c4a82b2d4b8fd47d86f6c7f19db48cd55facbecaf8989423567" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.357274 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.390385 4949 generic.go:334] "Generic (PLEG): container finished" podID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerID="55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7" exitCode=143 Feb 16 11:31:16 crc kubenswrapper[4949]: I0216 11:31:16.390566 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" event={"ID":"52108945-9ec3-4a39-9f9b-e6a79ea4adc7","Type":"ContainerDied","Data":"55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7"} Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.261590 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.440222 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:17 crc kubenswrapper[4949]: E0216 11:31:17.441103 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerName="barbican-api-log" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441121 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerName="barbican-api-log" Feb 16 11:31:17 crc kubenswrapper[4949]: E0216 11:31:17.441138 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" containerName="dnsmasq-dns" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441144 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" containerName="dnsmasq-dns" Feb 16 11:31:17 crc kubenswrapper[4949]: E0216 11:31:17.441162 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" containerName="init" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441184 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" containerName="init" Feb 16 11:31:17 crc kubenswrapper[4949]: E0216 11:31:17.441195 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerName="barbican-api" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441200 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerName="barbican-api" Feb 16 11:31:17 crc kubenswrapper[4949]: E0216 11:31:17.441248 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" containerName="cinder-db-sync" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441257 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" containerName="cinder-db-sync" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441510 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerName="barbican-api" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441523 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="24bc9e69-eda6-45c5-b43a-0dbe44f2d12d" containerName="dnsmasq-dns" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441535 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa9e92a7-29ce-4802-961f-3ab63430f40e" containerName="barbican-api-log" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.441547 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" containerName="cinder-db-sync" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.442999 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.447679 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.450633 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-x8kqq" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.455803 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.456039 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.480884 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.551059 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-f2c9s"] Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.553276 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.579975 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-f2c9s"] Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.615310 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-scripts\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.615397 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlmww\" (UniqueName: \"kubernetes.io/projected/de507279-e660-4141-becd-e2e55408a30d-kube-api-access-hlmww\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.615458 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.615546 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.615722 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.615774 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/de507279-e660-4141-becd-e2e55408a30d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.713054 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.717034 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718485 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718562 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718627 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/de507279-e660-4141-becd-e2e55408a30d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718732 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-scripts\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718768 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlmww\" (UniqueName: \"kubernetes.io/projected/de507279-e660-4141-becd-e2e55408a30d-kube-api-access-hlmww\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718810 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718857 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrhl4\" (UniqueName: \"kubernetes.io/projected/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-kube-api-access-xrhl4\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718894 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-svc\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718922 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.718954 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.719048 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.719081 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-config\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.720503 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.720675 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/de507279-e660-4141-becd-e2e55408a30d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.733723 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.734741 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-scripts\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.751831 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.757662 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlmww\" (UniqueName: \"kubernetes.io/projected/de507279-e660-4141-becd-e2e55408a30d-kube-api-access-hlmww\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.766010 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.770678 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data\") pod \"cinder-scheduler-0\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.786856 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821046 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821107 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-scripts\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821263 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-logs\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821332 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrhl4\" (UniqueName: \"kubernetes.io/projected/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-kube-api-access-xrhl4\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821366 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-svc\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821385 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821406 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821433 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821512 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data-custom\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821541 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821565 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-config\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821598 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.821631 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmshd\" (UniqueName: \"kubernetes.io/projected/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-kube-api-access-gmshd\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.822487 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.825486 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.825986 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-svc\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.830064 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.830659 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-config\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.844943 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-74f8bcc545-j46d5"] Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.845368 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-74f8bcc545-j46d5" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-api" containerID="cri-o://81542eacf05937369a012bc31d2f24fe8e1bbd60b859989367e99bfdf914d1a1" gracePeriod=30 Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.846297 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-74f8bcc545-j46d5" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-httpd" containerID="cri-o://2447ec3101884bb3a4a65d8a5bb4e65c0eea62a71a0577b852d152d4e8899067" gracePeriod=30 Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.867680 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrhl4\" (UniqueName: \"kubernetes.io/projected/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-kube-api-access-xrhl4\") pod \"dnsmasq-dns-5784cf869f-f2c9s\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.898364 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.923702 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.923768 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmshd\" (UniqueName: \"kubernetes.io/projected/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-kube-api-access-gmshd\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.923799 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-scripts\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.923898 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-logs\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.923955 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.923986 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.924066 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data-custom\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.929879 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-logs\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.930096 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.936585 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-cff99c4df-6j2pt"] Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.938983 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.941792 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.942380 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data-custom\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.945010 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-scripts\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.952281 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:17 crc kubenswrapper[4949]: I0216 11:31:17.957779 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmshd\" (UniqueName: \"kubernetes.io/projected/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-kube-api-access-gmshd\") pod \"cinder-api-0\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " pod="openstack/cinder-api-0" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.001239 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-cff99c4df-6j2pt"] Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.027387 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-public-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.027491 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-combined-ca-bundle\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.027545 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-config\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.027590 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pn27\" (UniqueName: \"kubernetes.io/projected/9dcea626-54bc-4dba-a5d5-6df79c77216a-kube-api-access-2pn27\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.027783 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-httpd-config\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.027961 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-ovndb-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.028053 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-internal-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.032479 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.131802 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-public-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.132733 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-combined-ca-bundle\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.132850 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-config\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.134449 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pn27\" (UniqueName: \"kubernetes.io/projected/9dcea626-54bc-4dba-a5d5-6df79c77216a-kube-api-access-2pn27\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.134549 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-httpd-config\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.134708 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-ovndb-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.134843 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-internal-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.139332 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-combined-ca-bundle\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.143955 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-config\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.149789 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-public-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.151249 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-internal-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.154129 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-httpd-config\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.160593 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dcea626-54bc-4dba-a5d5-6df79c77216a-ovndb-tls-certs\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.161942 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pn27\" (UniqueName: \"kubernetes.io/projected/9dcea626-54bc-4dba-a5d5-6df79c77216a-kube-api-access-2pn27\") pod \"neutron-cff99c4df-6j2pt\" (UID: \"9dcea626-54bc-4dba-a5d5-6df79c77216a\") " pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.164424 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.226430 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-74f8bcc545-j46d5" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.195:9696/\": read tcp 10.217.0.2:35328->10.217.0.195:9696: read: connection reset by peer" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.238408 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vj2d\" (UniqueName: \"kubernetes.io/projected/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-kube-api-access-9vj2d\") pod \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.238446 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-combined-ca-bundle\") pod \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.238477 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data\") pod \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.238602 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-logs\") pod \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.238676 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data-custom\") pod \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.244317 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-logs" (OuterVolumeSpecName: "logs") pod "93df10b9-3ead-478c-9dd0-d7fbd3242ddb" (UID: "93df10b9-3ead-478c-9dd0-d7fbd3242ddb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.250611 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "93df10b9-3ead-478c-9dd0-d7fbd3242ddb" (UID: "93df10b9-3ead-478c-9dd0-d7fbd3242ddb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.255381 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.268506 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-kube-api-access-9vj2d" (OuterVolumeSpecName: "kube-api-access-9vj2d") pod "93df10b9-3ead-478c-9dd0-d7fbd3242ddb" (UID: "93df10b9-3ead-478c-9dd0-d7fbd3242ddb"). InnerVolumeSpecName "kube-api-access-9vj2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.333930 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data" (OuterVolumeSpecName: "config-data") pod "93df10b9-3ead-478c-9dd0-d7fbd3242ddb" (UID: "93df10b9-3ead-478c-9dd0-d7fbd3242ddb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.342866 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4x9wd\" (UniqueName: \"kubernetes.io/projected/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-kube-api-access-4x9wd\") pod \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.342999 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data\") pod \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.343038 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-logs\") pod \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.344897 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93df10b9-3ead-478c-9dd0-d7fbd3242ddb" (UID: "93df10b9-3ead-478c-9dd0-d7fbd3242ddb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.345434 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-logs" (OuterVolumeSpecName: "logs") pod "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" (UID: "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.346508 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-combined-ca-bundle\") pod \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.346738 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-combined-ca-bundle\") pod \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\" (UID: \"93df10b9-3ead-478c-9dd0-d7fbd3242ddb\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.346805 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data-custom\") pod \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\" (UID: \"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5\") " Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.348120 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vj2d\" (UniqueName: \"kubernetes.io/projected/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-kube-api-access-9vj2d\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.348149 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.348162 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.348189 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.348202 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: W0216 11:31:18.354553 4949 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/93df10b9-3ead-478c-9dd0-d7fbd3242ddb/volumes/kubernetes.io~secret/combined-ca-bundle Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.354709 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93df10b9-3ead-478c-9dd0-d7fbd3242ddb" (UID: "93df10b9-3ead-478c-9dd0-d7fbd3242ddb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.355687 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-kube-api-access-4x9wd" (OuterVolumeSpecName: "kube-api-access-4x9wd") pod "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" (UID: "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5"). InnerVolumeSpecName "kube-api-access-4x9wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.357459 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" (UID: "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.380424 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.392856 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" (UID: "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.452246 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.452281 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93df10b9-3ead-478c-9dd0-d7fbd3242ddb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.452291 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.452301 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4x9wd\" (UniqueName: \"kubernetes.io/projected/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-kube-api-access-4x9wd\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.491066 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" event={"ID":"8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5","Type":"ContainerDied","Data":"a41f266cf3c717a0322e98934ea17654d3373796d0c54550b81435f4e6bdd676"} Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.491131 4949 scope.go:117] "RemoveContainer" containerID="787115588af80eca11d98588f198014f1337e7320b4940c5371cd5cca6c98580" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.491359 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-59d764b56d-xgwkg" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.541883 4949 generic.go:334] "Generic (PLEG): container finished" podID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerID="2447ec3101884bb3a4a65d8a5bb4e65c0eea62a71a0577b852d152d4e8899067" exitCode=0 Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.543354 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74f8bcc545-j46d5" event={"ID":"b7171cf3-1339-4609-bf59-f9a3777bb15c","Type":"ContainerDied","Data":"2447ec3101884bb3a4a65d8a5bb4e65c0eea62a71a0577b852d152d4e8899067"} Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.549320 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data" (OuterVolumeSpecName: "config-data") pod "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" (UID: "8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.560294 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.562987 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-778c8b6f4c-g5qht" event={"ID":"93df10b9-3ead-478c-9dd0-d7fbd3242ddb","Type":"ContainerDied","Data":"71bd2467eab360d2ef498394c98059157dfee6721d05b142cdda3251aacd107d"} Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.563469 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-778c8b6f4c-g5qht" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.655465 4949 scope.go:117] "RemoveContainer" containerID="3b87fce00b2c7c023e7f41d779803404b3a9c8cd34ed274a57736eb479c3c072" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.769805 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-778c8b6f4c-g5qht"] Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.804725 4949 scope.go:117] "RemoveContainer" containerID="e346b0c500dffe767c26dd615243a95e3628b0906861d6aae148302a7ac533ad" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.809127 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-778c8b6f4c-g5qht"] Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.877904 4949 scope.go:117] "RemoveContainer" containerID="ac8a17d19f086f2d7cae416be2d1b025d2c92c16a5ceea66e7b15e74dfe8955d" Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.913517 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-59d764b56d-xgwkg"] Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.926737 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-59d764b56d-xgwkg"] Feb 16 11:31:18 crc kubenswrapper[4949]: I0216 11:31:18.995900 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-f2c9s"] Feb 16 11:31:19 crc kubenswrapper[4949]: E0216 11:31:19.021136 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="c59b957e-c5f8-463f-8228-1051225f5140" Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.284008 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" path="/var/lib/kubelet/pods/8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5/volumes" Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.307426 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" path="/var/lib/kubelet/pods/93df10b9-3ead-478c-9dd0-d7fbd3242ddb/volumes" Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.444955 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-74f8bcc545-j46d5" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.195:9696/\": dial tcp 10.217.0.195:9696: connect: connection refused" Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.523127 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.619184 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.662998 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9dd500a3-a7c1-4d0a-9c63-a2855b645f06","Type":"ContainerStarted","Data":"0bd7901e9d3fc4302684b4f2314fee3022afb74d66a8cdf3682e81035fcf59a2"} Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.680777 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c59b957e-c5f8-463f-8228-1051225f5140","Type":"ContainerStarted","Data":"9637ba0f42a043237105a683774ab07d6abf8daaa30e847aca569d8a8556d888"} Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.681372 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="ceilometer-notification-agent" containerID="cri-o://8eaa87aadd7b317f610cc7d3050e7b6dc48d4248736b9228780b0c0df54af339" gracePeriod=30 Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.681643 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.682051 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="proxy-httpd" containerID="cri-o://9637ba0f42a043237105a683774ab07d6abf8daaa30e847aca569d8a8556d888" gracePeriod=30 Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.682107 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="sg-core" containerID="cri-o://e12f9ac9cc1163e7fbc665fa1788a710a8045ef5a8e189c7103ed2fe5d1a1a79" gracePeriod=30 Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.704236 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" event={"ID":"2345d1ab-b1c1-4959-a5ac-5da97eda36e3","Type":"ContainerStarted","Data":"c5c568ce011540a2f01f66505ed694652acb79a94ce6b1e34edf3205b0512c34"} Feb 16 11:31:19 crc kubenswrapper[4949]: I0216 11:31:19.819773 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-cff99c4df-6j2pt"] Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.616617 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.724920 4949 generic.go:334] "Generic (PLEG): container finished" podID="c59b957e-c5f8-463f-8228-1051225f5140" containerID="9637ba0f42a043237105a683774ab07d6abf8daaa30e847aca569d8a8556d888" exitCode=0 Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.724949 4949 generic.go:334] "Generic (PLEG): container finished" podID="c59b957e-c5f8-463f-8228-1051225f5140" containerID="e12f9ac9cc1163e7fbc665fa1788a710a8045ef5a8e189c7103ed2fe5d1a1a79" exitCode=2 Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.724986 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c59b957e-c5f8-463f-8228-1051225f5140","Type":"ContainerDied","Data":"9637ba0f42a043237105a683774ab07d6abf8daaa30e847aca569d8a8556d888"} Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.725013 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c59b957e-c5f8-463f-8228-1051225f5140","Type":"ContainerDied","Data":"e12f9ac9cc1163e7fbc665fa1788a710a8045ef5a8e189c7103ed2fe5d1a1a79"} Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.730671 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": read tcp 10.217.0.2:36006->10.217.0.201:9311: read: connection reset by peer" Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.730681 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": read tcp 10.217.0.2:36000->10.217.0.201:9311: read: connection reset by peer" Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.731623 4949 generic.go:334] "Generic (PLEG): container finished" podID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" containerID="fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2" exitCode=0 Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.731701 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" event={"ID":"2345d1ab-b1c1-4959-a5ac-5da97eda36e3","Type":"ContainerDied","Data":"fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2"} Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.735519 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"de507279-e660-4141-becd-e2e55408a30d","Type":"ContainerStarted","Data":"a0beba34065de8d9bcb9d2f59cfe695e1b08fadc1cab388d3f938c9f901330ed"} Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.738443 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cff99c4df-6j2pt" event={"ID":"9dcea626-54bc-4dba-a5d5-6df79c77216a","Type":"ContainerStarted","Data":"113c4afa0474e39d80943196b50bdbc928d8d2b8a2177a21122116e08d8ea486"} Feb 16 11:31:20 crc kubenswrapper[4949]: I0216 11:31:20.738486 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cff99c4df-6j2pt" event={"ID":"9dcea626-54bc-4dba-a5d5-6df79c77216a","Type":"ContainerStarted","Data":"17d5f4bc17447510bd18e44d5681e122091ed27333edfa7f6f43d8789c3c352b"} Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.498736 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.628716 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-combined-ca-bundle\") pod \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.628778 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k489\" (UniqueName: \"kubernetes.io/projected/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-kube-api-access-6k489\") pod \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.628987 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-logs\") pod \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.629055 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data\") pod \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.629082 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data-custom\") pod \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\" (UID: \"52108945-9ec3-4a39-9f9b-e6a79ea4adc7\") " Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.632422 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-logs" (OuterVolumeSpecName: "logs") pod "52108945-9ec3-4a39-9f9b-e6a79ea4adc7" (UID: "52108945-9ec3-4a39-9f9b-e6a79ea4adc7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.643340 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "52108945-9ec3-4a39-9f9b-e6a79ea4adc7" (UID: "52108945-9ec3-4a39-9f9b-e6a79ea4adc7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.643564 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-kube-api-access-6k489" (OuterVolumeSpecName: "kube-api-access-6k489") pod "52108945-9ec3-4a39-9f9b-e6a79ea4adc7" (UID: "52108945-9ec3-4a39-9f9b-e6a79ea4adc7"). InnerVolumeSpecName "kube-api-access-6k489". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.695345 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52108945-9ec3-4a39-9f9b-e6a79ea4adc7" (UID: "52108945-9ec3-4a39-9f9b-e6a79ea4adc7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.733768 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.733808 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.733824 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.733832 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k489\" (UniqueName: \"kubernetes.io/projected/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-kube-api-access-6k489\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.766967 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data" (OuterVolumeSpecName: "config-data") pod "52108945-9ec3-4a39-9f9b-e6a79ea4adc7" (UID: "52108945-9ec3-4a39-9f9b-e6a79ea4adc7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.785896 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cff99c4df-6j2pt" event={"ID":"9dcea626-54bc-4dba-a5d5-6df79c77216a","Type":"ContainerStarted","Data":"e16237c9c458261a391848cab1bad60d7d820716e4e750e3efb3de7f9b183779"} Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.786007 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.795523 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9dd500a3-a7c1-4d0a-9c63-a2855b645f06","Type":"ContainerStarted","Data":"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5"} Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.815488 4949 generic.go:334] "Generic (PLEG): container finished" podID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerID="d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24" exitCode=0 Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.815623 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" event={"ID":"52108945-9ec3-4a39-9f9b-e6a79ea4adc7","Type":"ContainerDied","Data":"d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24"} Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.815666 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" event={"ID":"52108945-9ec3-4a39-9f9b-e6a79ea4adc7","Type":"ContainerDied","Data":"fd06c83eb197066eb5018f4aa8d20bab12b4fd67ff3c70af35b32b664c1bf679"} Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.815688 4949 scope.go:117] "RemoveContainer" containerID="d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.815910 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86cc5fd6cd-qvlrg" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.829635 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" event={"ID":"2345d1ab-b1c1-4959-a5ac-5da97eda36e3","Type":"ContainerStarted","Data":"1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc"} Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.830437 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.838715 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52108945-9ec3-4a39-9f9b-e6a79ea4adc7-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.845471 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"de507279-e660-4141-becd-e2e55408a30d","Type":"ContainerStarted","Data":"756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba"} Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.903479 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-cff99c4df-6j2pt" podStartSLOduration=4.9034549389999995 podStartE2EDuration="4.903454939s" podCreationTimestamp="2026-02-16 11:31:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:21.864078535 +0000 UTC m=+1471.493412720" watchObservedRunningTime="2026-02-16 11:31:21.903454939 +0000 UTC m=+1471.532789104" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.910371 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" podStartSLOduration=4.910346756 podStartE2EDuration="4.910346756s" podCreationTimestamp="2026-02-16 11:31:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:21.899160177 +0000 UTC m=+1471.528494352" watchObservedRunningTime="2026-02-16 11:31:21.910346756 +0000 UTC m=+1471.539680921" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.956114 4949 scope.go:117] "RemoveContainer" containerID="55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7" Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.966077 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-86cc5fd6cd-qvlrg"] Feb 16 11:31:21 crc kubenswrapper[4949]: I0216 11:31:21.982902 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-86cc5fd6cd-qvlrg"] Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.017600 4949 scope.go:117] "RemoveContainer" containerID="d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24" Feb 16 11:31:22 crc kubenswrapper[4949]: E0216 11:31:22.021063 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24\": container with ID starting with d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24 not found: ID does not exist" containerID="d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24" Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.021115 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24"} err="failed to get container status \"d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24\": rpc error: code = NotFound desc = could not find container \"d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24\": container with ID starting with d89822835d4f34c6207defb6ae5a2a3e0ed5115d6ebc11b0a6557517faf6ef24 not found: ID does not exist" Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.021151 4949 scope.go:117] "RemoveContainer" containerID="55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7" Feb 16 11:31:22 crc kubenswrapper[4949]: E0216 11:31:22.021753 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7\": container with ID starting with 55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7 not found: ID does not exist" containerID="55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7" Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.021781 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7"} err="failed to get container status \"55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7\": rpc error: code = NotFound desc = could not find container \"55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7\": container with ID starting with 55a092b2cea8fc5e598040cf4e4f820a46c707bfb22ce7c716d47bc1eeae98d7 not found: ID does not exist" Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.871344 4949 generic.go:334] "Generic (PLEG): container finished" podID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerID="81542eacf05937369a012bc31d2f24fe8e1bbd60b859989367e99bfdf914d1a1" exitCode=0 Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.871472 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74f8bcc545-j46d5" event={"ID":"b7171cf3-1339-4609-bf59-f9a3777bb15c","Type":"ContainerDied","Data":"81542eacf05937369a012bc31d2f24fe8e1bbd60b859989367e99bfdf914d1a1"} Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.879052 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9dd500a3-a7c1-4d0a-9c63-a2855b645f06","Type":"ContainerStarted","Data":"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f"} Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.879268 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerName="cinder-api-log" containerID="cri-o://600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5" gracePeriod=30 Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.879351 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.879461 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerName="cinder-api" containerID="cri-o://8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f" gracePeriod=30 Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.899426 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"de507279-e660-4141-becd-e2e55408a30d","Type":"ContainerStarted","Data":"bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf"} Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.906518 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.906483468 podStartE2EDuration="5.906483468s" podCreationTimestamp="2026-02-16 11:31:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:22.902392501 +0000 UTC m=+1472.531726666" watchObservedRunningTime="2026-02-16 11:31:22.906483468 +0000 UTC m=+1472.535817673" Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.929212 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g76dr" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" probeResult="failure" output=< Feb 16 11:31:22 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:31:22 crc kubenswrapper[4949]: > Feb 16 11:31:22 crc kubenswrapper[4949]: I0216 11:31:22.939515 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.979005886 podStartE2EDuration="5.93948166s" podCreationTimestamp="2026-02-16 11:31:17 +0000 UTC" firstStartedPulling="2026-02-16 11:31:19.661687175 +0000 UTC m=+1469.291021340" lastFinishedPulling="2026-02-16 11:31:20.622162949 +0000 UTC m=+1470.251497114" observedRunningTime="2026-02-16 11:31:22.922897726 +0000 UTC m=+1472.552231891" watchObservedRunningTime="2026-02-16 11:31:22.93948166 +0000 UTC m=+1472.568815825" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.257887 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" path="/var/lib/kubelet/pods/52108945-9ec3-4a39-9f9b-e6a79ea4adc7/volumes" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.601409 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.648389 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.740712 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data-custom\") pod \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.740761 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-etc-machine-id\") pod \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.740795 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-combined-ca-bundle\") pod \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.740836 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmshd\" (UniqueName: \"kubernetes.io/projected/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-kube-api-access-gmshd\") pod \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.740911 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data\") pod \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741599 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-logs\") pod \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741631 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-config\") pod \"b7171cf3-1339-4609-bf59-f9a3777bb15c\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741745 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-internal-tls-certs\") pod \"b7171cf3-1339-4609-bf59-f9a3777bb15c\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741795 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-httpd-config\") pod \"b7171cf3-1339-4609-bf59-f9a3777bb15c\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741820 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-scripts\") pod \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\" (UID: \"9dd500a3-a7c1-4d0a-9c63-a2855b645f06\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741837 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nfp9\" (UniqueName: \"kubernetes.io/projected/b7171cf3-1339-4609-bf59-f9a3777bb15c-kube-api-access-7nfp9\") pod \"b7171cf3-1339-4609-bf59-f9a3777bb15c\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741859 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-combined-ca-bundle\") pod \"b7171cf3-1339-4609-bf59-f9a3777bb15c\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741885 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-public-tls-certs\") pod \"b7171cf3-1339-4609-bf59-f9a3777bb15c\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.741943 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-ovndb-tls-certs\") pod \"b7171cf3-1339-4609-bf59-f9a3777bb15c\" (UID: \"b7171cf3-1339-4609-bf59-f9a3777bb15c\") " Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.749291 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-kube-api-access-gmshd" (OuterVolumeSpecName: "kube-api-access-gmshd") pod "9dd500a3-a7c1-4d0a-9c63-a2855b645f06" (UID: "9dd500a3-a7c1-4d0a-9c63-a2855b645f06"). InnerVolumeSpecName "kube-api-access-gmshd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.749505 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9dd500a3-a7c1-4d0a-9c63-a2855b645f06" (UID: "9dd500a3-a7c1-4d0a-9c63-a2855b645f06"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.754895 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b7171cf3-1339-4609-bf59-f9a3777bb15c" (UID: "b7171cf3-1339-4609-bf59-f9a3777bb15c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.756066 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-logs" (OuterVolumeSpecName: "logs") pod "9dd500a3-a7c1-4d0a-9c63-a2855b645f06" (UID: "9dd500a3-a7c1-4d0a-9c63-a2855b645f06"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.757039 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-scripts" (OuterVolumeSpecName: "scripts") pod "9dd500a3-a7c1-4d0a-9c63-a2855b645f06" (UID: "9dd500a3-a7c1-4d0a-9c63-a2855b645f06"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.777295 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9dd500a3-a7c1-4d0a-9c63-a2855b645f06" (UID: "9dd500a3-a7c1-4d0a-9c63-a2855b645f06"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.780446 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7171cf3-1339-4609-bf59-f9a3777bb15c-kube-api-access-7nfp9" (OuterVolumeSpecName: "kube-api-access-7nfp9") pod "b7171cf3-1339-4609-bf59-f9a3777bb15c" (UID: "b7171cf3-1339-4609-bf59-f9a3777bb15c"). InnerVolumeSpecName "kube-api-access-7nfp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.822856 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9dd500a3-a7c1-4d0a-9c63-a2855b645f06" (UID: "9dd500a3-a7c1-4d0a-9c63-a2855b645f06"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.845636 4949 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.845666 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.845677 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nfp9\" (UniqueName: \"kubernetes.io/projected/b7171cf3-1339-4609-bf59-f9a3777bb15c-kube-api-access-7nfp9\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.845688 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.845696 4949 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.845703 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.845712 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmshd\" (UniqueName: \"kubernetes.io/projected/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-kube-api-access-gmshd\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.845723 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.863464 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data" (OuterVolumeSpecName: "config-data") pod "9dd500a3-a7c1-4d0a-9c63-a2855b645f06" (UID: "9dd500a3-a7c1-4d0a-9c63-a2855b645f06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.905336 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-config" (OuterVolumeSpecName: "config") pod "b7171cf3-1339-4609-bf59-f9a3777bb15c" (UID: "b7171cf3-1339-4609-bf59-f9a3777bb15c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.906075 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b7171cf3-1339-4609-bf59-f9a3777bb15c" (UID: "b7171cf3-1339-4609-bf59-f9a3777bb15c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.916354 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "b7171cf3-1339-4609-bf59-f9a3777bb15c" (UID: "b7171cf3-1339-4609-bf59-f9a3777bb15c"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.927787 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74f8bcc545-j46d5" event={"ID":"b7171cf3-1339-4609-bf59-f9a3777bb15c","Type":"ContainerDied","Data":"7e22823c267d9ec32b234f7b36ee28fd37b5609b771c357a521c731185076f05"} Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.927852 4949 scope.go:117] "RemoveContainer" containerID="2447ec3101884bb3a4a65d8a5bb4e65c0eea62a71a0577b852d152d4e8899067" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.928088 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74f8bcc545-j46d5" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.940658 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerID="8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f" exitCode=0 Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.940693 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerID="600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5" exitCode=143 Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.940741 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.940752 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9dd500a3-a7c1-4d0a-9c63-a2855b645f06","Type":"ContainerDied","Data":"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f"} Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.940785 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9dd500a3-a7c1-4d0a-9c63-a2855b645f06","Type":"ContainerDied","Data":"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5"} Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.940796 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9dd500a3-a7c1-4d0a-9c63-a2855b645f06","Type":"ContainerDied","Data":"0bd7901e9d3fc4302684b4f2314fee3022afb74d66a8cdf3682e81035fcf59a2"} Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.947142 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7171cf3-1339-4609-bf59-f9a3777bb15c" (UID: "b7171cf3-1339-4609-bf59-f9a3777bb15c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.948463 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dd500a3-a7c1-4d0a-9c63-a2855b645f06-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.948489 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.948500 4949 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.948511 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.948520 4949 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.951637 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b7171cf3-1339-4609-bf59-f9a3777bb15c" (UID: "b7171cf3-1339-4609-bf59-f9a3777bb15c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.952740 4949 generic.go:334] "Generic (PLEG): container finished" podID="c59b957e-c5f8-463f-8228-1051225f5140" containerID="8eaa87aadd7b317f610cc7d3050e7b6dc48d4248736b9228780b0c0df54af339" exitCode=0 Feb 16 11:31:23 crc kubenswrapper[4949]: I0216 11:31:23.954154 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c59b957e-c5f8-463f-8228-1051225f5140","Type":"ContainerDied","Data":"8eaa87aadd7b317f610cc7d3050e7b6dc48d4248736b9228780b0c0df54af339"} Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.050898 4949 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7171cf3-1339-4609-bf59-f9a3777bb15c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.072225 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.080122 4949 scope.go:117] "RemoveContainer" containerID="81542eacf05937369a012bc31d2f24fe8e1bbd60b859989367e99bfdf914d1a1" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.093097 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.121556 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.123143 4949 scope.go:117] "RemoveContainer" containerID="8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.153201 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-combined-ca-bundle\") pod \"c59b957e-c5f8-463f-8228-1051225f5140\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.153320 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-log-httpd\") pod \"c59b957e-c5f8-463f-8228-1051225f5140\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.153429 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-run-httpd\") pod \"c59b957e-c5f8-463f-8228-1051225f5140\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.153490 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-config-data\") pod \"c59b957e-c5f8-463f-8228-1051225f5140\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.153558 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-scripts\") pod \"c59b957e-c5f8-463f-8228-1051225f5140\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.153588 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-sg-core-conf-yaml\") pod \"c59b957e-c5f8-463f-8228-1051225f5140\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.153672 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2v464\" (UniqueName: \"kubernetes.io/projected/c59b957e-c5f8-463f-8228-1051225f5140-kube-api-access-2v464\") pod \"c59b957e-c5f8-463f-8228-1051225f5140\" (UID: \"c59b957e-c5f8-463f-8228-1051225f5140\") " Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.155843 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c59b957e-c5f8-463f-8228-1051225f5140" (UID: "c59b957e-c5f8-463f-8228-1051225f5140"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.158748 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c59b957e-c5f8-463f-8228-1051225f5140-kube-api-access-2v464" (OuterVolumeSpecName: "kube-api-access-2v464") pod "c59b957e-c5f8-463f-8228-1051225f5140" (UID: "c59b957e-c5f8-463f-8228-1051225f5140"). InnerVolumeSpecName "kube-api-access-2v464". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.162574 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-scripts" (OuterVolumeSpecName: "scripts") pod "c59b957e-c5f8-463f-8228-1051225f5140" (UID: "c59b957e-c5f8-463f-8228-1051225f5140"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.162984 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c59b957e-c5f8-463f-8228-1051225f5140" (UID: "c59b957e-c5f8-463f-8228-1051225f5140"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.180491 4949 scope.go:117] "RemoveContainer" containerID="600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.193467 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c59b957e-c5f8-463f-8228-1051225f5140" (UID: "c59b957e-c5f8-463f-8228-1051225f5140"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.193544 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194125 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194147 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194163 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-api" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194183 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-api" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194193 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerName="barbican-worker-log" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194200 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerName="barbican-worker-log" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194219 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerName="barbican-worker" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194227 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerName="barbican-worker" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194241 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="sg-core" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194248 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="sg-core" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194257 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerName="cinder-api" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194262 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerName="cinder-api" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194277 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerName="barbican-keystone-listener-log" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194283 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerName="barbican-keystone-listener-log" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194291 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerName="cinder-api-log" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194297 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerName="cinder-api-log" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194308 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-httpd" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194314 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-httpd" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194328 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api-log" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194334 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api-log" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194345 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="proxy-httpd" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194352 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="proxy-httpd" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194367 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="ceilometer-notification-agent" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194373 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="ceilometer-notification-agent" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.194390 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerName="barbican-keystone-listener" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194396 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerName="barbican-keystone-listener" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194589 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-httpd" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194605 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerName="cinder-api" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194614 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="proxy-httpd" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194624 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="ceilometer-notification-agent" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194634 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api-log" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194642 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerName="barbican-keystone-listener" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194652 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" containerName="neutron-api" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194662 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ca014b9-ddaa-458d-8cf1-6d1227ebcdc5" containerName="barbican-keystone-listener-log" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194676 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerName="barbican-worker-log" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194688 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="93df10b9-3ead-478c-9dd0-d7fbd3242ddb" containerName="barbican-worker" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194700 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c59b957e-c5f8-463f-8228-1051225f5140" containerName="sg-core" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194715 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="52108945-9ec3-4a39-9f9b-e6a79ea4adc7" containerName="barbican-api" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.194723 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" containerName="cinder-api-log" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.196298 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.199818 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.199943 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.200931 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.207559 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.254502 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c59b957e-c5f8-463f-8228-1051225f5140" (UID: "c59b957e-c5f8-463f-8228-1051225f5140"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.257149 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-public-tls-certs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.257223 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vndm4\" (UniqueName: \"kubernetes.io/projected/9962c0a5-0f9e-4564-95d0-f128685c2473-kube-api-access-vndm4\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.257293 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9962c0a5-0f9e-4564-95d0-f128685c2473-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.258330 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-scripts\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.258502 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-config-data\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.258634 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-config-data-custom\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.258816 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.258882 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9962c0a5-0f9e-4564-95d0-f128685c2473-logs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.259002 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.259192 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2v464\" (UniqueName: \"kubernetes.io/projected/c59b957e-c5f8-463f-8228-1051225f5140-kube-api-access-2v464\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.259208 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.259218 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.259228 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c59b957e-c5f8-463f-8228-1051225f5140-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.259237 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.259247 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.281080 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-config-data" (OuterVolumeSpecName: "config-data") pod "c59b957e-c5f8-463f-8228-1051225f5140" (UID: "c59b957e-c5f8-463f-8228-1051225f5140"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.310371 4949 scope.go:117] "RemoveContainer" containerID="8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.310997 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f\": container with ID starting with 8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f not found: ID does not exist" containerID="8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.311040 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f"} err="failed to get container status \"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f\": rpc error: code = NotFound desc = could not find container \"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f\": container with ID starting with 8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f not found: ID does not exist" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.311068 4949 scope.go:117] "RemoveContainer" containerID="600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5" Feb 16 11:31:24 crc kubenswrapper[4949]: E0216 11:31:24.311310 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5\": container with ID starting with 600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5 not found: ID does not exist" containerID="600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.311334 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5"} err="failed to get container status \"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5\": rpc error: code = NotFound desc = could not find container \"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5\": container with ID starting with 600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5 not found: ID does not exist" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.311349 4949 scope.go:117] "RemoveContainer" containerID="8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.311533 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f"} err="failed to get container status \"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f\": rpc error: code = NotFound desc = could not find container \"8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f\": container with ID starting with 8bfb3d97d25acef281596e76de0b7dc90f690e69f0beea9f9d8e96f8d66ae16f not found: ID does not exist" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.311553 4949 scope.go:117] "RemoveContainer" containerID="600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.315091 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5"} err="failed to get container status \"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5\": rpc error: code = NotFound desc = could not find container \"600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5\": container with ID starting with 600d9f66e5e657be1699c60f86c8434f3c13d8651b4750b274dcc6bfc3787aa5 not found: ID does not exist" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.332628 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-74f8bcc545-j46d5"] Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.344870 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-74f8bcc545-j46d5"] Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.362700 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-public-tls-certs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.362763 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vndm4\" (UniqueName: \"kubernetes.io/projected/9962c0a5-0f9e-4564-95d0-f128685c2473-kube-api-access-vndm4\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364115 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9962c0a5-0f9e-4564-95d0-f128685c2473-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364220 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-scripts\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364248 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-config-data\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364287 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-config-data-custom\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364300 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9962c0a5-0f9e-4564-95d0-f128685c2473-etc-machine-id\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364330 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364401 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9962c0a5-0f9e-4564-95d0-f128685c2473-logs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364458 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.364962 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c59b957e-c5f8-463f-8228-1051225f5140-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.365252 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9962c0a5-0f9e-4564-95d0-f128685c2473-logs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.370761 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-config-data-custom\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.371007 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.371515 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-config-data\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.376668 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-scripts\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.377038 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-public-tls-certs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.377296 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9962c0a5-0f9e-4564-95d0-f128685c2473-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.380688 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vndm4\" (UniqueName: \"kubernetes.io/projected/9962c0a5-0f9e-4564-95d0-f128685c2473-kube-api-access-vndm4\") pod \"cinder-api-0\" (UID: \"9962c0a5-0f9e-4564-95d0-f128685c2473\") " pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.623679 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.973363 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c59b957e-c5f8-463f-8228-1051225f5140","Type":"ContainerDied","Data":"f49736c49e7ee0d3cf1d00ebe30072967923739f33b01ee94dcd5aecb04fe41d"} Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.973839 4949 scope.go:117] "RemoveContainer" containerID="9637ba0f42a043237105a683774ab07d6abf8daaa30e847aca569d8a8556d888" Feb 16 11:31:24 crc kubenswrapper[4949]: I0216 11:31:24.973386 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.015902 4949 scope.go:117] "RemoveContainer" containerID="e12f9ac9cc1163e7fbc665fa1788a710a8045ef5a8e189c7103ed2fe5d1a1a79" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.054259 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.065115 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.072069 4949 scope.go:117] "RemoveContainer" containerID="8eaa87aadd7b317f610cc7d3050e7b6dc48d4248736b9228780b0c0df54af339" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.120241 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.124715 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.124784 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.126107 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.142255 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.142572 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.143034 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.153272 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 16 11:31:25 crc kubenswrapper[4949]: W0216 11:31:25.155438 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9962c0a5_0f9e_4564_95d0_f128685c2473.slice/crio-15034ef8c30c549994ff27f964e1c6111ce67a1ef48e4db63250f4bd538607f3 WatchSource:0}: Error finding container 15034ef8c30c549994ff27f964e1c6111ce67a1ef48e4db63250f4bd538607f3: Status 404 returned error can't find the container with id 15034ef8c30c549994ff27f964e1c6111ce67a1ef48e4db63250f4bd538607f3 Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.259290 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dd500a3-a7c1-4d0a-9c63-a2855b645f06" path="/var/lib/kubelet/pods/9dd500a3-a7c1-4d0a-9c63-a2855b645f06/volumes" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.260304 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7171cf3-1339-4609-bf59-f9a3777bb15c" path="/var/lib/kubelet/pods/b7171cf3-1339-4609-bf59-f9a3777bb15c/volumes" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.260970 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c59b957e-c5f8-463f-8228-1051225f5140" path="/var/lib/kubelet/pods/c59b957e-c5f8-463f-8228-1051225f5140/volumes" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.285587 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-run-httpd\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.287042 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcb58\" (UniqueName: \"kubernetes.io/projected/f2936866-3cf4-417f-b715-a00150d249bb-kube-api-access-qcb58\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.287076 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.287117 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-log-httpd\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.287318 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-scripts\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.287346 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.287394 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-config-data\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.389782 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-scripts\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.389850 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.389894 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-config-data\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.390005 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-run-httpd\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.390039 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcb58\" (UniqueName: \"kubernetes.io/projected/f2936866-3cf4-417f-b715-a00150d249bb-kube-api-access-qcb58\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.390068 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.390102 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-log-httpd\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.390884 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-log-httpd\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.390891 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-run-httpd\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.398452 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.398951 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-config-data\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.412023 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-scripts\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.412028 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.424227 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcb58\" (UniqueName: \"kubernetes.io/projected/f2936866-3cf4-417f-b715-a00150d249bb-kube-api-access-qcb58\") pod \"ceilometer-0\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.592552 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.619657 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-8fd485cb6-bhx7b"] Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.629987 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.738259 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8fd485cb6-bhx7b"] Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.897750 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-scripts\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.897804 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-combined-ca-bundle\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.897867 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw2lz\" (UniqueName: \"kubernetes.io/projected/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-kube-api-access-xw2lz\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.897886 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-internal-tls-certs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.897906 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-logs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.898010 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-config-data\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:25 crc kubenswrapper[4949]: I0216 11:31:25.898032 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-public-tls-certs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.019458 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-scripts\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.019674 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-combined-ca-bundle\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.025212 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw2lz\" (UniqueName: \"kubernetes.io/projected/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-kube-api-access-xw2lz\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.025246 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-internal-tls-certs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.025298 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-logs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.025724 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-config-data\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.025755 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-public-tls-certs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.029680 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-logs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.034659 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-public-tls-certs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.045838 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9962c0a5-0f9e-4564-95d0-f128685c2473","Type":"ContainerStarted","Data":"15034ef8c30c549994ff27f964e1c6111ce67a1ef48e4db63250f4bd538607f3"} Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.049068 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-scripts\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.049559 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-internal-tls-certs\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.050250 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-combined-ca-bundle\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.055993 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-config-data\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.056955 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw2lz\" (UniqueName: \"kubernetes.io/projected/a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc-kube-api-access-xw2lz\") pod \"placement-8fd485cb6-bhx7b\" (UID: \"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc\") " pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.294741 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.343069 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:26 crc kubenswrapper[4949]: I0216 11:31:26.910431 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8fd485cb6-bhx7b"] Feb 16 11:31:26 crc kubenswrapper[4949]: W0216 11:31:26.919713 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda74bfcd5_7bd3_49aa_8e77_c1e17fc431dc.slice/crio-077b676df0dd6435587b36dd622628a40ef4c0e0133d1c01656b3042457f1229 WatchSource:0}: Error finding container 077b676df0dd6435587b36dd622628a40ef4c0e0133d1c01656b3042457f1229: Status 404 returned error can't find the container with id 077b676df0dd6435587b36dd622628a40ef4c0e0133d1c01656b3042457f1229 Feb 16 11:31:27 crc kubenswrapper[4949]: I0216 11:31:27.071842 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9962c0a5-0f9e-4564-95d0-f128685c2473","Type":"ContainerStarted","Data":"b7ada3ef2c6ee0a928388e3658079d7e7923c7c058e26144f5c2247e4fc82bdf"} Feb 16 11:31:27 crc kubenswrapper[4949]: I0216 11:31:27.075443 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8fd485cb6-bhx7b" event={"ID":"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc","Type":"ContainerStarted","Data":"077b676df0dd6435587b36dd622628a40ef4c0e0133d1c01656b3042457f1229"} Feb 16 11:31:27 crc kubenswrapper[4949]: I0216 11:31:27.077126 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerStarted","Data":"5626b7b51ad1547488e575954cb1a33e2ba2b158e11d7c0c8fc96cf2a0b07d88"} Feb 16 11:31:27 crc kubenswrapper[4949]: I0216 11:31:27.787921 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Feb 16 11:31:27 crc kubenswrapper[4949]: I0216 11:31:27.900705 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.000810 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-ttzj9"] Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.001439 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" podUID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" containerName="dnsmasq-dns" containerID="cri-o://1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9" gracePeriod=10 Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.106739 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"9962c0a5-0f9e-4564-95d0-f128685c2473","Type":"ContainerStarted","Data":"fa2d374f96fa0123be130cc5a31f297429c096af9756f7b6b37d8621ceb7dadc"} Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.108211 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.125864 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8fd485cb6-bhx7b" event={"ID":"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc","Type":"ContainerStarted","Data":"f4cf59e3697520f7b24febc781bb2241c20a102e9d697216d94c2d6dce5e3392"} Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.125918 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8fd485cb6-bhx7b" event={"ID":"a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc","Type":"ContainerStarted","Data":"7afded4832616d67c1f924d8c62f07e9c88f5e764a421e2d7824f5cbd12ab9e1"} Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.126386 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.126574 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.129483 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerStarted","Data":"a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2"} Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.181546 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.181526047 podStartE2EDuration="4.181526047s" podCreationTimestamp="2026-02-16 11:31:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:28.136303456 +0000 UTC m=+1477.765637621" watchObservedRunningTime="2026-02-16 11:31:28.181526047 +0000 UTC m=+1477.810860202" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.201027 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-8fd485cb6-bhx7b" podStartSLOduration=3.201000883 podStartE2EDuration="3.201000883s" podCreationTimestamp="2026-02-16 11:31:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:28.166110457 +0000 UTC m=+1477.795444622" watchObservedRunningTime="2026-02-16 11:31:28.201000883 +0000 UTC m=+1477.830335048" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.280871 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.335207 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.669037 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.724193 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-svc\") pod \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.724293 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-config\") pod \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.724373 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plq9j\" (UniqueName: \"kubernetes.io/projected/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-kube-api-access-plq9j\") pod \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.724426 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-nb\") pod \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.724477 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-sb\") pod \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.724495 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-swift-storage-0\") pod \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\" (UID: \"9dc25628-882d-4ad4-a5ee-23e3b3d14abd\") " Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.757412 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-kube-api-access-plq9j" (OuterVolumeSpecName: "kube-api-access-plq9j") pod "9dc25628-882d-4ad4-a5ee-23e3b3d14abd" (UID: "9dc25628-882d-4ad4-a5ee-23e3b3d14abd"). InnerVolumeSpecName "kube-api-access-plq9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.799756 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9dc25628-882d-4ad4-a5ee-23e3b3d14abd" (UID: "9dc25628-882d-4ad4-a5ee-23e3b3d14abd"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.829392 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plq9j\" (UniqueName: \"kubernetes.io/projected/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-kube-api-access-plq9j\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.829442 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.948523 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9dc25628-882d-4ad4-a5ee-23e3b3d14abd" (UID: "9dc25628-882d-4ad4-a5ee-23e3b3d14abd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.950772 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:28 crc kubenswrapper[4949]: I0216 11:31:28.980100 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9dc25628-882d-4ad4-a5ee-23e3b3d14abd" (UID: "9dc25628-882d-4ad4-a5ee-23e3b3d14abd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.008678 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9dc25628-882d-4ad4-a5ee-23e3b3d14abd" (UID: "9dc25628-882d-4ad4-a5ee-23e3b3d14abd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.055440 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.055509 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.066748 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-config" (OuterVolumeSpecName: "config") pod "9dc25628-882d-4ad4-a5ee-23e3b3d14abd" (UID: "9dc25628-882d-4ad4-a5ee-23e3b3d14abd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.145603 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerStarted","Data":"df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23"} Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.148767 4949 generic.go:334] "Generic (PLEG): container finished" podID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" containerID="1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9" exitCode=0 Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.149023 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" event={"ID":"9dc25628-882d-4ad4-a5ee-23e3b3d14abd","Type":"ContainerDied","Data":"1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9"} Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.149142 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" event={"ID":"9dc25628-882d-4ad4-a5ee-23e3b3d14abd","Type":"ContainerDied","Data":"e77841fe9f11feb562a3471a916ce5267dbaaebf6a541c77361575093761df75"} Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.149052 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-ttzj9" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.149209 4949 scope.go:117] "RemoveContainer" containerID="1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.149507 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="de507279-e660-4141-becd-e2e55408a30d" containerName="cinder-scheduler" containerID="cri-o://756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba" gracePeriod=30 Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.149607 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="de507279-e660-4141-becd-e2e55408a30d" containerName="probe" containerID="cri-o://bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf" gracePeriod=30 Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.159870 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dc25628-882d-4ad4-a5ee-23e3b3d14abd-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.195618 4949 scope.go:117] "RemoveContainer" containerID="71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.251743 4949 scope.go:117] "RemoveContainer" containerID="1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9" Feb 16 11:31:29 crc kubenswrapper[4949]: E0216 11:31:29.255727 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9\": container with ID starting with 1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9 not found: ID does not exist" containerID="1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.255797 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9"} err="failed to get container status \"1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9\": rpc error: code = NotFound desc = could not find container \"1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9\": container with ID starting with 1ae6065bda4a79260c5f6a78206b506b6d6b0860cbe831e505dfc3edf5993fa9 not found: ID does not exist" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.255835 4949 scope.go:117] "RemoveContainer" containerID="71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.259640 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-ttzj9"] Feb 16 11:31:29 crc kubenswrapper[4949]: E0216 11:31:29.260818 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39\": container with ID starting with 71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39 not found: ID does not exist" containerID="71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.260862 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39"} err="failed to get container status \"71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39\": rpc error: code = NotFound desc = could not find container \"71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39\": container with ID starting with 71fe167f5801ab4214d333971415f3732f46ee074d859509535998c65aef0e39 not found: ID does not exist" Feb 16 11:31:29 crc kubenswrapper[4949]: I0216 11:31:29.270650 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-ttzj9"] Feb 16 11:31:30 crc kubenswrapper[4949]: I0216 11:31:30.162357 4949 generic.go:334] "Generic (PLEG): container finished" podID="de507279-e660-4141-becd-e2e55408a30d" containerID="bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf" exitCode=0 Feb 16 11:31:30 crc kubenswrapper[4949]: I0216 11:31:30.162446 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"de507279-e660-4141-becd-e2e55408a30d","Type":"ContainerDied","Data":"bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf"} Feb 16 11:31:30 crc kubenswrapper[4949]: I0216 11:31:30.169544 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerStarted","Data":"4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a"} Feb 16 11:31:31 crc kubenswrapper[4949]: I0216 11:31:31.186878 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerStarted","Data":"5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611"} Feb 16 11:31:31 crc kubenswrapper[4949]: I0216 11:31:31.187548 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:31:31 crc kubenswrapper[4949]: I0216 11:31:31.219863 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.778313187 podStartE2EDuration="6.219838747s" podCreationTimestamp="2026-02-16 11:31:25 +0000 UTC" firstStartedPulling="2026-02-16 11:31:26.344490635 +0000 UTC m=+1475.973824800" lastFinishedPulling="2026-02-16 11:31:30.786016195 +0000 UTC m=+1480.415350360" observedRunningTime="2026-02-16 11:31:31.206784034 +0000 UTC m=+1480.836118199" watchObservedRunningTime="2026-02-16 11:31:31.219838747 +0000 UTC m=+1480.849172912" Feb 16 11:31:31 crc kubenswrapper[4949]: I0216 11:31:31.251103 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" path="/var/lib/kubelet/pods/9dc25628-882d-4ad4-a5ee-23e3b3d14abd/volumes" Feb 16 11:31:31 crc kubenswrapper[4949]: I0216 11:31:31.443957 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-59b8c4c946-z72ns" Feb 16 11:31:31 crc kubenswrapper[4949]: I0216 11:31:31.943318 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.056591 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data-custom\") pod \"de507279-e660-4141-becd-e2e55408a30d\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.056745 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-scripts\") pod \"de507279-e660-4141-becd-e2e55408a30d\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.056873 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlmww\" (UniqueName: \"kubernetes.io/projected/de507279-e660-4141-becd-e2e55408a30d-kube-api-access-hlmww\") pod \"de507279-e660-4141-becd-e2e55408a30d\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.056922 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data\") pod \"de507279-e660-4141-becd-e2e55408a30d\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.057061 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-combined-ca-bundle\") pod \"de507279-e660-4141-becd-e2e55408a30d\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.057108 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/de507279-e660-4141-becd-e2e55408a30d-etc-machine-id\") pod \"de507279-e660-4141-becd-e2e55408a30d\" (UID: \"de507279-e660-4141-becd-e2e55408a30d\") " Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.057876 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de507279-e660-4141-becd-e2e55408a30d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "de507279-e660-4141-becd-e2e55408a30d" (UID: "de507279-e660-4141-becd-e2e55408a30d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.077378 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "de507279-e660-4141-becd-e2e55408a30d" (UID: "de507279-e660-4141-becd-e2e55408a30d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.077485 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-scripts" (OuterVolumeSpecName: "scripts") pod "de507279-e660-4141-becd-e2e55408a30d" (UID: "de507279-e660-4141-becd-e2e55408a30d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.082551 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de507279-e660-4141-becd-e2e55408a30d-kube-api-access-hlmww" (OuterVolumeSpecName: "kube-api-access-hlmww") pod "de507279-e660-4141-becd-e2e55408a30d" (UID: "de507279-e660-4141-becd-e2e55408a30d"). InnerVolumeSpecName "kube-api-access-hlmww". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.146983 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de507279-e660-4141-becd-e2e55408a30d" (UID: "de507279-e660-4141-becd-e2e55408a30d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.160232 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.160258 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.160267 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlmww\" (UniqueName: \"kubernetes.io/projected/de507279-e660-4141-becd-e2e55408a30d-kube-api-access-hlmww\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.160276 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.160284 4949 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/de507279-e660-4141-becd-e2e55408a30d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.200263 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data" (OuterVolumeSpecName: "config-data") pod "de507279-e660-4141-becd-e2e55408a30d" (UID: "de507279-e660-4141-becd-e2e55408a30d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.202019 4949 generic.go:334] "Generic (PLEG): container finished" podID="de507279-e660-4141-becd-e2e55408a30d" containerID="756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba" exitCode=0 Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.202393 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"de507279-e660-4141-becd-e2e55408a30d","Type":"ContainerDied","Data":"756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba"} Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.202474 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"de507279-e660-4141-becd-e2e55408a30d","Type":"ContainerDied","Data":"a0beba34065de8d9bcb9d2f59cfe695e1b08fadc1cab388d3f938c9f901330ed"} Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.202484 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.202502 4949 scope.go:117] "RemoveContainer" containerID="bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.262259 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de507279-e660-4141-becd-e2e55408a30d-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.288601 4949 scope.go:117] "RemoveContainer" containerID="756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.311258 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.329121 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.346703 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:32 crc kubenswrapper[4949]: E0216 11:31:32.347266 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" containerName="init" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.347286 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" containerName="init" Feb 16 11:31:32 crc kubenswrapper[4949]: E0216 11:31:32.347301 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de507279-e660-4141-becd-e2e55408a30d" containerName="cinder-scheduler" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.347310 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="de507279-e660-4141-becd-e2e55408a30d" containerName="cinder-scheduler" Feb 16 11:31:32 crc kubenswrapper[4949]: E0216 11:31:32.347324 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" containerName="dnsmasq-dns" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.347330 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" containerName="dnsmasq-dns" Feb 16 11:31:32 crc kubenswrapper[4949]: E0216 11:31:32.347348 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de507279-e660-4141-becd-e2e55408a30d" containerName="probe" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.347354 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="de507279-e660-4141-becd-e2e55408a30d" containerName="probe" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.347780 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dc25628-882d-4ad4-a5ee-23e3b3d14abd" containerName="dnsmasq-dns" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.347813 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="de507279-e660-4141-becd-e2e55408a30d" containerName="probe" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.347829 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="de507279-e660-4141-becd-e2e55408a30d" containerName="cinder-scheduler" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.349117 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.352698 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.362802 4949 scope.go:117] "RemoveContainer" containerID="bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf" Feb 16 11:31:32 crc kubenswrapper[4949]: E0216 11:31:32.375768 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf\": container with ID starting with bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf not found: ID does not exist" containerID="bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.375872 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf"} err="failed to get container status \"bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf\": rpc error: code = NotFound desc = could not find container \"bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf\": container with ID starting with bfd65f9609247151a0eb01af3eeed9d10a4452921548eb812d13f14db07b16bf not found: ID does not exist" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.375913 4949 scope.go:117] "RemoveContainer" containerID="756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba" Feb 16 11:31:32 crc kubenswrapper[4949]: E0216 11:31:32.382634 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba\": container with ID starting with 756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba not found: ID does not exist" containerID="756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.382697 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba"} err="failed to get container status \"756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba\": rpc error: code = NotFound desc = could not find container \"756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba\": container with ID starting with 756fc3585e568ab563e651983eafc8e5cbe8aa0de6a5256452fba4485c28ffba not found: ID does not exist" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.410413 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.470305 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5842411c-0b82-455a-a060-0546d00907be-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.470742 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-scripts\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.470860 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-config-data\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.471051 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.472357 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prm4t\" (UniqueName: \"kubernetes.io/projected/5842411c-0b82-455a-a060-0546d00907be-kube-api-access-prm4t\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.472563 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.575183 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5842411c-0b82-455a-a060-0546d00907be-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.575252 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-scripts\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.575314 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-config-data\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.575366 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5842411c-0b82-455a-a060-0546d00907be-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.575408 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.575582 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prm4t\" (UniqueName: \"kubernetes.io/projected/5842411c-0b82-455a-a060-0546d00907be-kube-api-access-prm4t\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.575643 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.579781 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-config-data\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.580888 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-scripts\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.581158 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.581475 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5842411c-0b82-455a-a060-0546d00907be-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.594722 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prm4t\" (UniqueName: \"kubernetes.io/projected/5842411c-0b82-455a-a060-0546d00907be-kube-api-access-prm4t\") pod \"cinder-scheduler-0\" (UID: \"5842411c-0b82-455a-a060-0546d00907be\") " pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.684228 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 16 11:31:32 crc kubenswrapper[4949]: I0216 11:31:32.918148 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g76dr" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" probeResult="failure" output=< Feb 16 11:31:32 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:31:32 crc kubenswrapper[4949]: > Feb 16 11:31:33 crc kubenswrapper[4949]: I0216 11:31:33.198354 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 16 11:31:33 crc kubenswrapper[4949]: I0216 11:31:33.217733 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5842411c-0b82-455a-a060-0546d00907be","Type":"ContainerStarted","Data":"69917e2116b75dc787da090ccf1e1b8a164b6141ff380005ed75369dbbdc6dd1"} Feb 16 11:31:33 crc kubenswrapper[4949]: I0216 11:31:33.255783 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de507279-e660-4141-becd-e2e55408a30d" path="/var/lib/kubelet/pods/de507279-e660-4141-becd-e2e55408a30d/volumes" Feb 16 11:31:34 crc kubenswrapper[4949]: I0216 11:31:34.243357 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5842411c-0b82-455a-a060-0546d00907be","Type":"ContainerStarted","Data":"807b56e3701dc9b170cfdde9a6e3e7b859ebbf59c36fbb7e22765fd0b56831ae"} Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.258337 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5842411c-0b82-455a-a060-0546d00907be","Type":"ContainerStarted","Data":"df08ddbb851e8081f370e7efdbb44f1e40fffb974396e7e28f7982db42451791"} Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.290817 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.290794739 podStartE2EDuration="3.290794739s" podCreationTimestamp="2026-02-16 11:31:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:35.276391728 +0000 UTC m=+1484.905725923" watchObservedRunningTime="2026-02-16 11:31:35.290794739 +0000 UTC m=+1484.920128894" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.452131 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.453903 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.464784 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.480755 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.480890 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-w4dfw" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.481094 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.555648 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzxxf\" (UniqueName: \"kubernetes.io/projected/fea9f5ef-1f12-446a-ab82-50631b44a37f-kube-api-access-wzxxf\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.555786 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fea9f5ef-1f12-446a-ab82-50631b44a37f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.555945 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fea9f5ef-1f12-446a-ab82-50631b44a37f-openstack-config\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.555994 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fea9f5ef-1f12-446a-ab82-50631b44a37f-openstack-config-secret\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.658476 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fea9f5ef-1f12-446a-ab82-50631b44a37f-openstack-config\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.658545 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fea9f5ef-1f12-446a-ab82-50631b44a37f-openstack-config-secret\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.658580 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzxxf\" (UniqueName: \"kubernetes.io/projected/fea9f5ef-1f12-446a-ab82-50631b44a37f-kube-api-access-wzxxf\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.658655 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fea9f5ef-1f12-446a-ab82-50631b44a37f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.660225 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fea9f5ef-1f12-446a-ab82-50631b44a37f-openstack-config\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.675633 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fea9f5ef-1f12-446a-ab82-50631b44a37f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.681019 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fea9f5ef-1f12-446a-ab82-50631b44a37f-openstack-config-secret\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.683302 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzxxf\" (UniqueName: \"kubernetes.io/projected/fea9f5ef-1f12-446a-ab82-50631b44a37f-kube-api-access-wzxxf\") pod \"openstackclient\" (UID: \"fea9f5ef-1f12-446a-ab82-50631b44a37f\") " pod="openstack/openstackclient" Feb 16 11:31:35 crc kubenswrapper[4949]: I0216 11:31:35.808844 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 16 11:31:36 crc kubenswrapper[4949]: I0216 11:31:36.604731 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 16 11:31:37 crc kubenswrapper[4949]: I0216 11:31:37.300454 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"fea9f5ef-1f12-446a-ab82-50631b44a37f","Type":"ContainerStarted","Data":"f77174ae8a03e5a1b5b2ac8b0d2f55bfe540afd65e89c04baacf7e9b4ded414c"} Feb 16 11:31:37 crc kubenswrapper[4949]: I0216 11:31:37.509432 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Feb 16 11:31:37 crc kubenswrapper[4949]: I0216 11:31:37.684364 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.559163 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.559487 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="ceilometer-central-agent" containerID="cri-o://a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2" gracePeriod=30 Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.560106 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="proxy-httpd" containerID="cri-o://5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611" gracePeriod=30 Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.560165 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="sg-core" containerID="cri-o://4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a" gracePeriod=30 Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.560285 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="ceilometer-notification-agent" containerID="cri-o://df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23" gracePeriod=30 Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.754521 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-65dcd67cf9-prcpn"] Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.757319 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.759275 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.763195 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.763534 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Feb 16 11:31:38 crc kubenswrapper[4949]: I0216 11:31:38.782050 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-65dcd67cf9-prcpn"] Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.194028 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-combined-ca-bundle\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.194120 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-public-tls-certs\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.194160 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-log-httpd\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.194217 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-config-data\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.194250 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-etc-swift\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.194580 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-run-httpd\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.194837 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-internal-tls-certs\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.194928 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksd64\" (UniqueName: \"kubernetes.io/projected/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-kube-api-access-ksd64\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.302953 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-combined-ca-bundle\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.303064 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-public-tls-certs\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.309439 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-log-httpd\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.309771 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-config-data\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.309994 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-log-httpd\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.309830 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-etc-swift\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.310910 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-combined-ca-bundle\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.311622 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-run-httpd\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.311880 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-internal-tls-certs\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.311948 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksd64\" (UniqueName: \"kubernetes.io/projected/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-kube-api-access-ksd64\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.312119 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-public-tls-certs\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.313359 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-run-httpd\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.320266 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-internal-tls-certs\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.321641 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-etc-swift\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.330328 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-config-data\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.344745 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksd64\" (UniqueName: \"kubernetes.io/projected/f27f5322-b5fc-4c02-ac21-6cf820ce08ce-kube-api-access-ksd64\") pod \"swift-proxy-65dcd67cf9-prcpn\" (UID: \"f27f5322-b5fc-4c02-ac21-6cf820ce08ce\") " pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.349854 4949 generic.go:334] "Generic (PLEG): container finished" podID="f2936866-3cf4-417f-b715-a00150d249bb" containerID="5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611" exitCode=0 Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.349912 4949 generic.go:334] "Generic (PLEG): container finished" podID="f2936866-3cf4-417f-b715-a00150d249bb" containerID="4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a" exitCode=2 Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.349925 4949 generic.go:334] "Generic (PLEG): container finished" podID="f2936866-3cf4-417f-b715-a00150d249bb" containerID="a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2" exitCode=0 Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.349916 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerDied","Data":"5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611"} Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.350028 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerDied","Data":"4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a"} Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.350050 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerDied","Data":"a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2"} Feb 16 11:31:39 crc kubenswrapper[4949]: I0216 11:31:39.427924 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.031734 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.134369 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcb58\" (UniqueName: \"kubernetes.io/projected/f2936866-3cf4-417f-b715-a00150d249bb-kube-api-access-qcb58\") pod \"f2936866-3cf4-417f-b715-a00150d249bb\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.134460 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-sg-core-conf-yaml\") pod \"f2936866-3cf4-417f-b715-a00150d249bb\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.134659 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-log-httpd\") pod \"f2936866-3cf4-417f-b715-a00150d249bb\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.134746 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-config-data\") pod \"f2936866-3cf4-417f-b715-a00150d249bb\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.134825 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-combined-ca-bundle\") pod \"f2936866-3cf4-417f-b715-a00150d249bb\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.134897 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-run-httpd\") pod \"f2936866-3cf4-417f-b715-a00150d249bb\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.134949 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-scripts\") pod \"f2936866-3cf4-417f-b715-a00150d249bb\" (UID: \"f2936866-3cf4-417f-b715-a00150d249bb\") " Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.137106 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f2936866-3cf4-417f-b715-a00150d249bb" (UID: "f2936866-3cf4-417f-b715-a00150d249bb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.137895 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f2936866-3cf4-417f-b715-a00150d249bb" (UID: "f2936866-3cf4-417f-b715-a00150d249bb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.149441 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-scripts" (OuterVolumeSpecName: "scripts") pod "f2936866-3cf4-417f-b715-a00150d249bb" (UID: "f2936866-3cf4-417f-b715-a00150d249bb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.156366 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2936866-3cf4-417f-b715-a00150d249bb-kube-api-access-qcb58" (OuterVolumeSpecName: "kube-api-access-qcb58") pod "f2936866-3cf4-417f-b715-a00150d249bb" (UID: "f2936866-3cf4-417f-b715-a00150d249bb"). InnerVolumeSpecName "kube-api-access-qcb58". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.194508 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-65dcd67cf9-prcpn"] Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.204027 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f2936866-3cf4-417f-b715-a00150d249bb" (UID: "f2936866-3cf4-417f-b715-a00150d249bb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:40 crc kubenswrapper[4949]: W0216 11:31:40.215689 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf27f5322_b5fc_4c02_ac21_6cf820ce08ce.slice/crio-1ba2fa626336c647bf34208ee1d73f44b167408227a7acd8d70b8b08029de4ac WatchSource:0}: Error finding container 1ba2fa626336c647bf34208ee1d73f44b167408227a7acd8d70b8b08029de4ac: Status 404 returned error can't find the container with id 1ba2fa626336c647bf34208ee1d73f44b167408227a7acd8d70b8b08029de4ac Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.238524 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.239000 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.239141 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcb58\" (UniqueName: \"kubernetes.io/projected/f2936866-3cf4-417f-b715-a00150d249bb-kube-api-access-qcb58\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.239513 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.239736 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2936866-3cf4-417f-b715-a00150d249bb-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.346323 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f2936866-3cf4-417f-b715-a00150d249bb" (UID: "f2936866-3cf4-417f-b715-a00150d249bb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.349798 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.379536 4949 generic.go:334] "Generic (PLEG): container finished" podID="f2936866-3cf4-417f-b715-a00150d249bb" containerID="df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23" exitCode=0 Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.379643 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerDied","Data":"df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23"} Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.379701 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f2936866-3cf4-417f-b715-a00150d249bb","Type":"ContainerDied","Data":"5626b7b51ad1547488e575954cb1a33e2ba2b158e11d7c0c8fc96cf2a0b07d88"} Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.379726 4949 scope.go:117] "RemoveContainer" containerID="5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.379988 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.392603 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-65dcd67cf9-prcpn" event={"ID":"f27f5322-b5fc-4c02-ac21-6cf820ce08ce","Type":"ContainerStarted","Data":"1ba2fa626336c647bf34208ee1d73f44b167408227a7acd8d70b8b08029de4ac"} Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.397623 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-config-data" (OuterVolumeSpecName: "config-data") pod "f2936866-3cf4-417f-b715-a00150d249bb" (UID: "f2936866-3cf4-417f-b715-a00150d249bb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.447377 4949 scope.go:117] "RemoveContainer" containerID="4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.452581 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2936866-3cf4-417f-b715-a00150d249bb-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.474328 4949 scope.go:117] "RemoveContainer" containerID="df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.505461 4949 scope.go:117] "RemoveContainer" containerID="a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.549476 4949 scope.go:117] "RemoveContainer" containerID="5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611" Feb 16 11:31:40 crc kubenswrapper[4949]: E0216 11:31:40.552162 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611\": container with ID starting with 5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611 not found: ID does not exist" containerID="5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.552239 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611"} err="failed to get container status \"5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611\": rpc error: code = NotFound desc = could not find container \"5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611\": container with ID starting with 5c6dbf926bde95dd6e2f3bc5174fc727cf9849d5b0df9d64eea230113701c611 not found: ID does not exist" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.552276 4949 scope.go:117] "RemoveContainer" containerID="4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a" Feb 16 11:31:40 crc kubenswrapper[4949]: E0216 11:31:40.552730 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a\": container with ID starting with 4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a not found: ID does not exist" containerID="4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.552770 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a"} err="failed to get container status \"4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a\": rpc error: code = NotFound desc = could not find container \"4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a\": container with ID starting with 4704d9592b6ee32852b6b3565e05968432225b00681da1dcde794b62c521429a not found: ID does not exist" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.552789 4949 scope.go:117] "RemoveContainer" containerID="df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23" Feb 16 11:31:40 crc kubenswrapper[4949]: E0216 11:31:40.553202 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23\": container with ID starting with df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23 not found: ID does not exist" containerID="df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.553231 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23"} err="failed to get container status \"df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23\": rpc error: code = NotFound desc = could not find container \"df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23\": container with ID starting with df5508fcf10f574d56c7adc75b769e915803c29a12b5da68770ad1bf706f9c23 not found: ID does not exist" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.553247 4949 scope.go:117] "RemoveContainer" containerID="a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2" Feb 16 11:31:40 crc kubenswrapper[4949]: E0216 11:31:40.554676 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2\": container with ID starting with a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2 not found: ID does not exist" containerID="a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.554720 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2"} err="failed to get container status \"a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2\": rpc error: code = NotFound desc = could not find container \"a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2\": container with ID starting with a6105c78d1151101f8a7b0f374a2f29d36168a38da75c3583d71623e30a2a6f2 not found: ID does not exist" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.812013 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.830978 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.845732 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:40 crc kubenswrapper[4949]: E0216 11:31:40.846447 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="ceilometer-notification-agent" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.846501 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="ceilometer-notification-agent" Feb 16 11:31:40 crc kubenswrapper[4949]: E0216 11:31:40.846519 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="ceilometer-central-agent" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.846527 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="ceilometer-central-agent" Feb 16 11:31:40 crc kubenswrapper[4949]: E0216 11:31:40.846548 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="proxy-httpd" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.846555 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="proxy-httpd" Feb 16 11:31:40 crc kubenswrapper[4949]: E0216 11:31:40.846572 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="sg-core" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.846577 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="sg-core" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.846880 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="proxy-httpd" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.846906 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="sg-core" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.846919 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="ceilometer-central-agent" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.846932 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2936866-3cf4-417f-b715-a00150d249bb" containerName="ceilometer-notification-agent" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.849694 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.852510 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.853329 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.863823 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.968231 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-config-data\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.968371 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-log-httpd\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.968391 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.968438 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.968494 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdlzp\" (UniqueName: \"kubernetes.io/projected/e8932af0-5e7c-4715-bdfa-5b84382387d3-kube-api-access-zdlzp\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.968540 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-scripts\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:40 crc kubenswrapper[4949]: I0216 11:31:40.968603 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-run-httpd\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.070945 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-scripts\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.071063 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-run-httpd\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.071126 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-config-data\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.071262 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-log-httpd\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.071287 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.071333 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.071387 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdlzp\" (UniqueName: \"kubernetes.io/projected/e8932af0-5e7c-4715-bdfa-5b84382387d3-kube-api-access-zdlzp\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.086942 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-log-httpd\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.090454 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-scripts\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.090814 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.095984 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.096049 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdlzp\" (UniqueName: \"kubernetes.io/projected/e8932af0-5e7c-4715-bdfa-5b84382387d3-kube-api-access-zdlzp\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.096571 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-run-httpd\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.098908 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-config-data\") pod \"ceilometer-0\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.184968 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.258652 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2936866-3cf4-417f-b715-a00150d249bb" path="/var/lib/kubelet/pods/f2936866-3cf4-417f-b715-a00150d249bb/volumes" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.438309 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-65dcd67cf9-prcpn" event={"ID":"f27f5322-b5fc-4c02-ac21-6cf820ce08ce","Type":"ContainerStarted","Data":"8173e739e92ef1f60150c116bc254924d5e66758649ffd69a25a3223cc554a8f"} Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.438374 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-65dcd67cf9-prcpn" event={"ID":"f27f5322-b5fc-4c02-ac21-6cf820ce08ce","Type":"ContainerStarted","Data":"23cbbd17fac31353a9055df266fe61a1d3c83aa98e9365e0df3f76244057980e"} Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.441878 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.442154 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.488095 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-65dcd67cf9-prcpn" podStartSLOduration=3.488068751 podStartE2EDuration="3.488068751s" podCreationTimestamp="2026-02-16 11:31:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:41.4684143 +0000 UTC m=+1491.097748465" watchObservedRunningTime="2026-02-16 11:31:41.488068751 +0000 UTC m=+1491.117402936" Feb 16 11:31:41 crc kubenswrapper[4949]: I0216 11:31:41.834418 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:41 crc kubenswrapper[4949]: W0216 11:31:41.837930 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8932af0_5e7c_4715_bdfa_5b84382387d3.slice/crio-38722a9d34bd46e0c5a3dd45eb6410eb3144e7095733b31dbbc2a905b8e6a1ae WatchSource:0}: Error finding container 38722a9d34bd46e0c5a3dd45eb6410eb3144e7095733b31dbbc2a905b8e6a1ae: Status 404 returned error can't find the container with id 38722a9d34bd46e0c5a3dd45eb6410eb3144e7095733b31dbbc2a905b8e6a1ae Feb 16 11:31:42 crc kubenswrapper[4949]: I0216 11:31:42.485920 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerStarted","Data":"38722a9d34bd46e0c5a3dd45eb6410eb3144e7095733b31dbbc2a905b8e6a1ae"} Feb 16 11:31:42 crc kubenswrapper[4949]: I0216 11:31:42.900842 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g76dr" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" probeResult="failure" output=< Feb 16 11:31:42 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:31:42 crc kubenswrapper[4949]: > Feb 16 11:31:42 crc kubenswrapper[4949]: I0216 11:31:42.926293 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Feb 16 11:31:43 crc kubenswrapper[4949]: I0216 11:31:43.503010 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerStarted","Data":"8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559"} Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.134574 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-c9d48d96d-4b894"] Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.137493 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.144924 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.144934 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.145377 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-f55x5" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.178966 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-c9d48d96d-4b894"] Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.251146 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7dc46f9d6-d9jsn"] Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.257532 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.261564 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.269767 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.269937 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data-custom\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.269986 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w422\" (UniqueName: \"kubernetes.io/projected/90b83c30-3793-4e05-80d4-3a714ad09404-kube-api-access-6w422\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.270053 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-combined-ca-bundle\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.281689 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7dc46f9d6-d9jsn"] Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.306690 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-zglfk"] Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.309529 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.324350 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-zglfk"] Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.373595 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5db54b7646-q4qzf"] Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.375378 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.376697 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w422\" (UniqueName: \"kubernetes.io/projected/90b83c30-3793-4e05-80d4-3a714ad09404-kube-api-access-6w422\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.376735 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-combined-ca-bundle\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.376834 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.376860 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-combined-ca-bundle\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.376886 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.376907 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.376928 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtqt7\" (UniqueName: \"kubernetes.io/projected/cbb85a9a-237f-474a-b7d4-95f06bc44724-kube-api-access-dtqt7\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.376992 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn7kx\" (UniqueName: \"kubernetes.io/projected/16d14031-a4a4-4965-a1fc-b385b05df235-kube-api-access-xn7kx\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.377037 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-config\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.377097 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.377117 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.377155 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.377234 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data-custom\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.377385 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data-custom\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.387542 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data-custom\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.393341 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.393677 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.404194 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5db54b7646-q4qzf"] Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.408294 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-combined-ca-bundle\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.457038 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w422\" (UniqueName: \"kubernetes.io/projected/90b83c30-3793-4e05-80d4-3a714ad09404-kube-api-access-6w422\") pod \"heat-engine-c9d48d96d-4b894\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486253 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486326 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486351 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486379 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtqt7\" (UniqueName: \"kubernetes.io/projected/cbb85a9a-237f-474a-b7d4-95f06bc44724-kube-api-access-dtqt7\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486476 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn7kx\" (UniqueName: \"kubernetes.io/projected/16d14031-a4a4-4965-a1fc-b385b05df235-kube-api-access-xn7kx\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486542 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-config\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486629 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486691 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486712 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-combined-ca-bundle\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486746 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486786 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74vnw\" (UniqueName: \"kubernetes.io/projected/d67acfe2-211f-45ab-b9ef-de039be62b4f-kube-api-access-74vnw\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486878 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data-custom\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.486895 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data-custom\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.487013 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-combined-ca-bundle\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.489038 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.489738 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.490313 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.490907 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.491966 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-config\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.508739 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-combined-ca-bundle\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.509695 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.517189 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.522991 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data-custom\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.535385 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtqt7\" (UniqueName: \"kubernetes.io/projected/cbb85a9a-237f-474a-b7d4-95f06bc44724-kube-api-access-dtqt7\") pod \"heat-cfnapi-7dc46f9d6-d9jsn\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.550057 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn7kx\" (UniqueName: \"kubernetes.io/projected/16d14031-a4a4-4965-a1fc-b385b05df235-kube-api-access-xn7kx\") pod \"dnsmasq-dns-f6bc4c6c9-zglfk\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.589252 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-combined-ca-bundle\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.589331 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.589386 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74vnw\" (UniqueName: \"kubernetes.io/projected/d67acfe2-211f-45ab-b9ef-de039be62b4f-kube-api-access-74vnw\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.589457 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data-custom\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.597304 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.599325 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-combined-ca-bundle\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.599995 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data-custom\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.600649 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.625639 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74vnw\" (UniqueName: \"kubernetes.io/projected/d67acfe2-211f-45ab-b9ef-de039be62b4f-kube-api-access-74vnw\") pod \"heat-api-5db54b7646-q4qzf\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.658463 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:31:44 crc kubenswrapper[4949]: I0216 11:31:44.715854 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:31:48 crc kubenswrapper[4949]: I0216 11:31:48.336818 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:31:48 crc kubenswrapper[4949]: I0216 11:31:48.392227 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-cff99c4df-6j2pt" Feb 16 11:31:48 crc kubenswrapper[4949]: I0216 11:31:48.490365 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f6ff59f84-j4tfn"] Feb 16 11:31:48 crc kubenswrapper[4949]: I0216 11:31:48.490702 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f6ff59f84-j4tfn" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerName="neutron-api" containerID="cri-o://093358e6b634260643a762f42b27824e10a12260cc9d2a87fa7d0d7057ca5c34" gracePeriod=30 Feb 16 11:31:48 crc kubenswrapper[4949]: I0216 11:31:48.490857 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f6ff59f84-j4tfn" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerName="neutron-httpd" containerID="cri-o://e5c79f6a615745bee1f21cdaaa659128533899b6ee7d10afd6a96d6843731d3a" gracePeriod=30 Feb 16 11:31:49 crc kubenswrapper[4949]: I0216 11:31:49.452708 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:49 crc kubenswrapper[4949]: I0216 11:31:49.455163 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-65dcd67cf9-prcpn" Feb 16 11:31:49 crc kubenswrapper[4949]: I0216 11:31:49.696075 4949 generic.go:334] "Generic (PLEG): container finished" podID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerID="e5c79f6a615745bee1f21cdaaa659128533899b6ee7d10afd6a96d6843731d3a" exitCode=0 Feb 16 11:31:49 crc kubenswrapper[4949]: I0216 11:31:49.697517 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f6ff59f84-j4tfn" event={"ID":"4f44c7bf-e0a8-4250-b513-147e6e88718d","Type":"ContainerDied","Data":"e5c79f6a615745bee1f21cdaaa659128533899b6ee7d10afd6a96d6843731d3a"} Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.340037 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-7b8694cbb9-kdjtx"] Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.346462 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.358961 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5d4bdcc77b-tmvps"] Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.363031 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.398095 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7b8694cbb9-kdjtx"] Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.427698 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5d4bdcc77b-tmvps"] Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.453045 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5489d8946b-q8mgt"] Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.461191 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.486525 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5489d8946b-q8mgt"] Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.528491 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-combined-ca-bundle\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.528564 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-config-data-custom\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.528631 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wdms\" (UniqueName: \"kubernetes.io/projected/c53f95c1-224b-4026-9e51-9a2677621865-kube-api-access-5wdms\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.528695 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-combined-ca-bundle\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.528970 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.529152 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-config-data\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.529212 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n59t8\" (UniqueName: \"kubernetes.io/projected/53ab577c-be20-45fa-9c92-52524f44c90a-kube-api-access-n59t8\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.529232 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data-custom\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631534 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631630 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-config-data\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631659 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n59t8\" (UniqueName: \"kubernetes.io/projected/53ab577c-be20-45fa-9c92-52524f44c90a-kube-api-access-n59t8\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631681 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data-custom\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631741 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmsv8\" (UniqueName: \"kubernetes.io/projected/e4998389-d4f4-44f0-b048-988b96e27acc-kube-api-access-xmsv8\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631771 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631830 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-combined-ca-bundle\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631881 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-combined-ca-bundle\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631902 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-config-data-custom\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.631944 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wdms\" (UniqueName: \"kubernetes.io/projected/c53f95c1-224b-4026-9e51-9a2677621865-kube-api-access-5wdms\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.632005 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-combined-ca-bundle\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.632035 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data-custom\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.643312 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-config-data\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.647241 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data-custom\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.647561 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-combined-ca-bundle\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.651135 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-combined-ca-bundle\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.654762 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/53ab577c-be20-45fa-9c92-52524f44c90a-config-data-custom\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.656404 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.659930 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n59t8\" (UniqueName: \"kubernetes.io/projected/53ab577c-be20-45fa-9c92-52524f44c90a-kube-api-access-n59t8\") pod \"heat-engine-7b8694cbb9-kdjtx\" (UID: \"53ab577c-be20-45fa-9c92-52524f44c90a\") " pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.671319 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wdms\" (UniqueName: \"kubernetes.io/projected/c53f95c1-224b-4026-9e51-9a2677621865-kube-api-access-5wdms\") pod \"heat-api-5d4bdcc77b-tmvps\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.716235 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.734938 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmsv8\" (UniqueName: \"kubernetes.io/projected/e4998389-d4f4-44f0-b048-988b96e27acc-kube-api-access-xmsv8\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.735005 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.735113 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-combined-ca-bundle\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.735265 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data-custom\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.747052 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.755018 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.758348 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-combined-ca-bundle\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.761109 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmsv8\" (UniqueName: \"kubernetes.io/projected/e4998389-d4f4-44f0-b048-988b96e27acc-kube-api-access-xmsv8\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.761541 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data-custom\") pod \"heat-cfnapi-5489d8946b-q8mgt\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:51 crc kubenswrapper[4949]: I0216 11:31:51.801525 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:31:52 crc kubenswrapper[4949]: I0216 11:31:52.053361 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:31:52 crc kubenswrapper[4949]: I0216 11:31:52.130381 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:31:52 crc kubenswrapper[4949]: I0216 11:31:52.319767 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g76dr"] Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.163543 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7dc46f9d6-d9jsn"] Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.201055 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5db54b7646-q4qzf"] Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.234256 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-566c9b565f-fv7vz"] Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.242476 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.251956 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.252344 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.376849 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5cdc74fffc-n2hcr"] Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.383865 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-566c9b565f-fv7vz"] Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.384217 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5cdc74fffc-n2hcr"] Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.384429 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.393284 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.398029 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.454652 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-public-tls-certs\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.454776 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-config-data-custom\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.454931 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48zgf\" (UniqueName: \"kubernetes.io/projected/c2936fd0-97ae-4028-a1b7-27feb4919790-kube-api-access-48zgf\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.455061 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-config-data\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.455081 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-combined-ca-bundle\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.455106 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-internal-tls-certs\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.563531 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-config-data\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.563851 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch4jw\" (UniqueName: \"kubernetes.io/projected/ecc029c7-0e9a-4211-8632-56c7b5e1b179-kube-api-access-ch4jw\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.563876 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-combined-ca-bundle\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.563905 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-internal-tls-certs\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.564136 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-combined-ca-bundle\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.564259 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-public-tls-certs\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.564366 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-public-tls-certs\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.564441 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-internal-tls-certs\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.564463 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-config-data-custom\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.564529 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-config-data\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.564596 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-config-data-custom\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.564739 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48zgf\" (UniqueName: \"kubernetes.io/projected/c2936fd0-97ae-4028-a1b7-27feb4919790-kube-api-access-48zgf\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.593902 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-public-tls-certs\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.593979 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48zgf\" (UniqueName: \"kubernetes.io/projected/c2936fd0-97ae-4028-a1b7-27feb4919790-kube-api-access-48zgf\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.594617 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-config-data-custom\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.595140 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-config-data\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.599398 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-internal-tls-certs\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.600250 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2936fd0-97ae-4028-a1b7-27feb4919790-combined-ca-bundle\") pod \"heat-cfnapi-566c9b565f-fv7vz\" (UID: \"c2936fd0-97ae-4028-a1b7-27feb4919790\") " pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.668835 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-config-data\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.668922 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-config-data-custom\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.669052 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch4jw\" (UniqueName: \"kubernetes.io/projected/ecc029c7-0e9a-4211-8632-56c7b5e1b179-kube-api-access-ch4jw\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.669111 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-combined-ca-bundle\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.669202 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-public-tls-certs\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.669320 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-internal-tls-certs\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.675629 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-config-data\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.677504 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-combined-ca-bundle\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.681501 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-public-tls-certs\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.681924 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-internal-tls-certs\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.686921 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ecc029c7-0e9a-4211-8632-56c7b5e1b179-config-data-custom\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.688105 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.697033 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch4jw\" (UniqueName: \"kubernetes.io/projected/ecc029c7-0e9a-4211-8632-56c7b5e1b179-kube-api-access-ch4jw\") pod \"heat-api-5cdc74fffc-n2hcr\" (UID: \"ecc029c7-0e9a-4211-8632-56c7b5e1b179\") " pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.771322 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.818778 4949 generic.go:334] "Generic (PLEG): container finished" podID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerID="093358e6b634260643a762f42b27824e10a12260cc9d2a87fa7d0d7057ca5c34" exitCode=0 Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.818904 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f6ff59f84-j4tfn" event={"ID":"4f44c7bf-e0a8-4250-b513-147e6e88718d","Type":"ContainerDied","Data":"093358e6b634260643a762f42b27824e10a12260cc9d2a87fa7d0d7057ca5c34"} Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.843583 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerStarted","Data":"404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d"} Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.874837 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"fea9f5ef-1f12-446a-ab82-50631b44a37f","Type":"ContainerStarted","Data":"113f72b0799d714af32a8b0c974e92297548ee90e91d363bcf5c84c6639f0aad"} Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.874911 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-g76dr" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" containerID="cri-o://4ffb6c751bef37d00aac44d4842949bcbf1991d78f3befd1670ea34e72a639b9" gracePeriod=2 Feb 16 11:31:53 crc kubenswrapper[4949]: I0216 11:31:53.953985 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.677597053 podStartE2EDuration="18.95395405s" podCreationTimestamp="2026-02-16 11:31:35 +0000 UTC" firstStartedPulling="2026-02-16 11:31:36.601409467 +0000 UTC m=+1486.230743632" lastFinishedPulling="2026-02-16 11:31:52.877766464 +0000 UTC m=+1502.507100629" observedRunningTime="2026-02-16 11:31:53.919439965 +0000 UTC m=+1503.548774160" watchObservedRunningTime="2026-02-16 11:31:53.95395405 +0000 UTC m=+1503.583288215" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.225367 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5d4bdcc77b-tmvps"] Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.269223 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-c9d48d96d-4b894"] Feb 16 11:31:54 crc kubenswrapper[4949]: W0216 11:31:54.273562 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90b83c30_3793_4e05_80d4_3a714ad09404.slice/crio-ea9aa67ca6d786c3ee39098ed21c5dd3ddfdb62d4fb96fb9892db9dde7fac11c WatchSource:0}: Error finding container ea9aa67ca6d786c3ee39098ed21c5dd3ddfdb62d4fb96fb9892db9dde7fac11c: Status 404 returned error can't find the container with id ea9aa67ca6d786c3ee39098ed21c5dd3ddfdb62d4fb96fb9892db9dde7fac11c Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.539153 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.634538 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-httpd-config\") pod \"4f44c7bf-e0a8-4250-b513-147e6e88718d\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.634894 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-ovndb-tls-certs\") pod \"4f44c7bf-e0a8-4250-b513-147e6e88718d\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.634924 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-combined-ca-bundle\") pod \"4f44c7bf-e0a8-4250-b513-147e6e88718d\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.635007 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjv9q\" (UniqueName: \"kubernetes.io/projected/4f44c7bf-e0a8-4250-b513-147e6e88718d-kube-api-access-gjv9q\") pod \"4f44c7bf-e0a8-4250-b513-147e6e88718d\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.635076 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-config\") pod \"4f44c7bf-e0a8-4250-b513-147e6e88718d\" (UID: \"4f44c7bf-e0a8-4250-b513-147e6e88718d\") " Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.640326 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f44c7bf-e0a8-4250-b513-147e6e88718d-kube-api-access-gjv9q" (OuterVolumeSpecName: "kube-api-access-gjv9q") pod "4f44c7bf-e0a8-4250-b513-147e6e88718d" (UID: "4f44c7bf-e0a8-4250-b513-147e6e88718d"). InnerVolumeSpecName "kube-api-access-gjv9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.640342 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "4f44c7bf-e0a8-4250-b513-147e6e88718d" (UID: "4f44c7bf-e0a8-4250-b513-147e6e88718d"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.735390 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f44c7bf-e0a8-4250-b513-147e6e88718d" (UID: "4f44c7bf-e0a8-4250-b513-147e6e88718d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.742879 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.742920 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjv9q\" (UniqueName: \"kubernetes.io/projected/4f44c7bf-e0a8-4250-b513-147e6e88718d-kube-api-access-gjv9q\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.742932 4949 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.751580 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7dc46f9d6-d9jsn"] Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.768650 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7b8694cbb9-kdjtx"] Feb 16 11:31:54 crc kubenswrapper[4949]: W0216 11:31:54.769185 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53ab577c_be20_45fa_9c92_52524f44c90a.slice/crio-b521e48ca7886281469cdce15483832d3047bc0e34c331ecf3e4ffb6cf5cff83 WatchSource:0}: Error finding container b521e48ca7886281469cdce15483832d3047bc0e34c331ecf3e4ffb6cf5cff83: Status 404 returned error can't find the container with id b521e48ca7886281469cdce15483832d3047bc0e34c331ecf3e4ffb6cf5cff83 Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.771251 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "4f44c7bf-e0a8-4250-b513-147e6e88718d" (UID: "4f44c7bf-e0a8-4250-b513-147e6e88718d"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.781031 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5489d8946b-q8mgt"] Feb 16 11:31:54 crc kubenswrapper[4949]: W0216 11:31:54.784888 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4998389_d4f4_44f0_b048_988b96e27acc.slice/crio-a727c4d866f78125d4f3a29b3f1533841a9284c1167f972f49cfe7e34f518847 WatchSource:0}: Error finding container a727c4d866f78125d4f3a29b3f1533841a9284c1167f972f49cfe7e34f518847: Status 404 returned error can't find the container with id a727c4d866f78125d4f3a29b3f1533841a9284c1167f972f49cfe7e34f518847 Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.848309 4949 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.901932 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-config" (OuterVolumeSpecName: "config") pod "4f44c7bf-e0a8-4250-b513-147e6e88718d" (UID: "4f44c7bf-e0a8-4250-b513-147e6e88718d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.936108 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" event={"ID":"cbb85a9a-237f-474a-b7d4-95f06bc44724","Type":"ContainerStarted","Data":"e4252a65cbc88b3bcc579e7b17a624a80c58e68af3e1e8861119fc9f72dd7871"} Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.955375 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7b8694cbb9-kdjtx" event={"ID":"53ab577c-be20-45fa-9c92-52524f44c90a","Type":"ContainerStarted","Data":"b521e48ca7886281469cdce15483832d3047bc0e34c331ecf3e4ffb6cf5cff83"} Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.955803 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f44c7bf-e0a8-4250-b513-147e6e88718d-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.967543 4949 generic.go:334] "Generic (PLEG): container finished" podID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerID="4ffb6c751bef37d00aac44d4842949bcbf1991d78f3befd1670ea34e72a639b9" exitCode=0 Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.967619 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g76dr" event={"ID":"acc263a1-4f57-4dca-bcc5-5d5388539a5d","Type":"ContainerDied","Data":"4ffb6c751bef37d00aac44d4842949bcbf1991d78f3befd1670ea34e72a639b9"} Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.972487 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerStarted","Data":"e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32"} Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.976673 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" event={"ID":"e4998389-d4f4-44f0-b048-988b96e27acc","Type":"ContainerStarted","Data":"a727c4d866f78125d4f3a29b3f1533841a9284c1167f972f49cfe7e34f518847"} Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.990529 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d4bdcc77b-tmvps" event={"ID":"c53f95c1-224b-4026-9e51-9a2677621865","Type":"ContainerStarted","Data":"8ccbcbad02cbf5527c493251eeb13a65658a5ea6c64c69cfef5cad7b0f649092"} Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.995346 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-c9d48d96d-4b894" event={"ID":"90b83c30-3793-4e05-80d4-3a714ad09404","Type":"ContainerStarted","Data":"65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab"} Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.995399 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-c9d48d96d-4b894" event={"ID":"90b83c30-3793-4e05-80d4-3a714ad09404","Type":"ContainerStarted","Data":"ea9aa67ca6d786c3ee39098ed21c5dd3ddfdb62d4fb96fb9892db9dde7fac11c"} Feb 16 11:31:54 crc kubenswrapper[4949]: I0216 11:31:54.996725 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.004145 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f6ff59f84-j4tfn" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.008925 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f6ff59f84-j4tfn" event={"ID":"4f44c7bf-e0a8-4250-b513-147e6e88718d","Type":"ContainerDied","Data":"fce5e9dea96f7b3de20adb34f3889488a5b616398a3402408b95e3a510892a06"} Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.009000 4949 scope.go:117] "RemoveContainer" containerID="e5c79f6a615745bee1f21cdaaa659128533899b6ee7d10afd6a96d6843731d3a" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.026476 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-c9d48d96d-4b894" podStartSLOduration=11.026454531 podStartE2EDuration="11.026454531s" podCreationTimestamp="2026-02-16 11:31:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:55.022876759 +0000 UTC m=+1504.652210924" watchObservedRunningTime="2026-02-16 11:31:55.026454531 +0000 UTC m=+1504.655788696" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.045022 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.125194 4949 scope.go:117] "RemoveContainer" containerID="093358e6b634260643a762f42b27824e10a12260cc9d2a87fa7d0d7057ca5c34" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.128205 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f6ff59f84-j4tfn"] Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.165674 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-utilities\") pod \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.165987 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-catalog-content\") pod \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.166098 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49lzw\" (UniqueName: \"kubernetes.io/projected/acc263a1-4f57-4dca-bcc5-5d5388539a5d-kube-api-access-49lzw\") pod \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\" (UID: \"acc263a1-4f57-4dca-bcc5-5d5388539a5d\") " Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.174426 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acc263a1-4f57-4dca-bcc5-5d5388539a5d-kube-api-access-49lzw" (OuterVolumeSpecName: "kube-api-access-49lzw") pod "acc263a1-4f57-4dca-bcc5-5d5388539a5d" (UID: "acc263a1-4f57-4dca-bcc5-5d5388539a5d"). InnerVolumeSpecName "kube-api-access-49lzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.175922 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-utilities" (OuterVolumeSpecName: "utilities") pod "acc263a1-4f57-4dca-bcc5-5d5388539a5d" (UID: "acc263a1-4f57-4dca-bcc5-5d5388539a5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.177703 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49lzw\" (UniqueName: \"kubernetes.io/projected/acc263a1-4f57-4dca-bcc5-5d5388539a5d-kube-api-access-49lzw\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.177731 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.194404 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7f6ff59f84-j4tfn"] Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.217600 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-zglfk"] Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.258451 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" path="/var/lib/kubelet/pods/4f44c7bf-e0a8-4250-b513-147e6e88718d/volumes" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.302606 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5db54b7646-q4qzf"] Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.304963 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "acc263a1-4f57-4dca-bcc5-5d5388539a5d" (UID: "acc263a1-4f57-4dca-bcc5-5d5388539a5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.364885 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-566c9b565f-fv7vz"] Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.379785 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5cdc74fffc-n2hcr"] Feb 16 11:31:55 crc kubenswrapper[4949]: I0216 11:31:55.387816 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acc263a1-4f57-4dca-bcc5-5d5388539a5d-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.024993 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerStarted","Data":"920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594"} Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.025525 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.025358 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="proxy-httpd" containerID="cri-o://920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594" gracePeriod=30 Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.025207 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="ceilometer-central-agent" containerID="cri-o://8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559" gracePeriod=30 Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.025430 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="sg-core" containerID="cri-o://e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32" gracePeriod=30 Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.025412 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="ceilometer-notification-agent" containerID="cri-o://404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d" gracePeriod=30 Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.033148 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7b8694cbb9-kdjtx" event={"ID":"53ab577c-be20-45fa-9c92-52524f44c90a","Type":"ContainerStarted","Data":"3ea2f7532cb34c2e48b127c60ec98860ee0915109225d11a99fa8d360af999b3"} Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.033268 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.036985 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-566c9b565f-fv7vz" event={"ID":"c2936fd0-97ae-4028-a1b7-27feb4919790","Type":"ContainerStarted","Data":"d396820cfcdff35b6e55b14c79bcf39218c8ea7654f9d8494034e64685a2ec50"} Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.045950 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g76dr" event={"ID":"acc263a1-4f57-4dca-bcc5-5d5388539a5d","Type":"ContainerDied","Data":"29ce604a72cd129b181271139de9b71a0233562f5fab2e134b087d9c42b19b10"} Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.046011 4949 scope.go:117] "RemoveContainer" containerID="4ffb6c751bef37d00aac44d4842949bcbf1991d78f3befd1670ea34e72a639b9" Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.046045 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g76dr" Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.067497 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5db54b7646-q4qzf" event={"ID":"d67acfe2-211f-45ab-b9ef-de039be62b4f","Type":"ContainerStarted","Data":"5194196f3e7a23a197b7f35169f5e54f0e0929cf9591ae7f50818257b3b0fee0"} Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.075882 4949 generic.go:334] "Generic (PLEG): container finished" podID="16d14031-a4a4-4965-a1fc-b385b05df235" containerID="699c9d2b2b02f2f410da90de2db4fc5edcb2890d9288751a6f41863f330e7dc8" exitCode=0 Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.076262 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" event={"ID":"16d14031-a4a4-4965-a1fc-b385b05df235","Type":"ContainerDied","Data":"699c9d2b2b02f2f410da90de2db4fc5edcb2890d9288751a6f41863f330e7dc8"} Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.076324 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" event={"ID":"16d14031-a4a4-4965-a1fc-b385b05df235","Type":"ContainerStarted","Data":"3f9c414e86e6fee300c25143da51b40d71575a509327b4307ec421fb1489891d"} Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.081769 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5cdc74fffc-n2hcr" event={"ID":"ecc029c7-0e9a-4211-8632-56c7b5e1b179","Type":"ContainerStarted","Data":"03fc66f99ca4b544315621bc3e04d6888675035f10c5c22a56d78a0d9b75c328"} Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.087311 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.620616076 podStartE2EDuration="16.08728899s" podCreationTimestamp="2026-02-16 11:31:40 +0000 UTC" firstStartedPulling="2026-02-16 11:31:41.843264789 +0000 UTC m=+1491.472598964" lastFinishedPulling="2026-02-16 11:31:55.309937713 +0000 UTC m=+1504.939271878" observedRunningTime="2026-02-16 11:31:56.074746762 +0000 UTC m=+1505.704080937" watchObservedRunningTime="2026-02-16 11:31:56.08728899 +0000 UTC m=+1505.716623155" Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.109558 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g76dr"] Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.121955 4949 scope.go:117] "RemoveContainer" containerID="92a955296f684feb4a5a1638bbfdec008357a86f1fc56d0721f036a71a5088dd" Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.126304 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-g76dr"] Feb 16 11:31:56 crc kubenswrapper[4949]: I0216 11:31:56.131801 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-7b8694cbb9-kdjtx" podStartSLOduration=5.131766799 podStartE2EDuration="5.131766799s" podCreationTimestamp="2026-02-16 11:31:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:31:56.109665969 +0000 UTC m=+1505.739000154" watchObservedRunningTime="2026-02-16 11:31:56.131766799 +0000 UTC m=+1505.761100964" Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.112776 4949 generic.go:334] "Generic (PLEG): container finished" podID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerID="e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32" exitCode=2 Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.114227 4949 generic.go:334] "Generic (PLEG): container finished" podID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerID="404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d" exitCode=0 Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.114301 4949 generic.go:334] "Generic (PLEG): container finished" podID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerID="8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559" exitCode=0 Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.114426 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerDied","Data":"e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32"} Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.114535 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerDied","Data":"404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d"} Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.114605 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerDied","Data":"8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559"} Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.249577 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" path="/var/lib/kubelet/pods/acc263a1-4f57-4dca-bcc5-5d5388539a5d/volumes" Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.976770 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:57 crc kubenswrapper[4949]: I0216 11:31:57.990474 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-8fd485cb6-bhx7b" Feb 16 11:31:58 crc kubenswrapper[4949]: I0216 11:31:58.107287 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-57d8c9758b-hz957"] Feb 16 11:31:58 crc kubenswrapper[4949]: I0216 11:31:58.107576 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-57d8c9758b-hz957" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerName="placement-log" containerID="cri-o://61eea95e9c0cc8ca6196d4f1b0b70447afa54bc0a4be198777c23caacda7d1e4" gracePeriod=30 Feb 16 11:31:58 crc kubenswrapper[4949]: I0216 11:31:58.108409 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-57d8c9758b-hz957" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerName="placement-api" containerID="cri-o://30dd2127088aa2dec617aca580389796ed51e24350de342faadf463373e71c1e" gracePeriod=30 Feb 16 11:31:59 crc kubenswrapper[4949]: I0216 11:31:59.014670 4949 scope.go:117] "RemoveContainer" containerID="551239ae0fd1826a8ae33dd25021464daf5cdbd897f04cebea1430951aa0a85d" Feb 16 11:31:59 crc kubenswrapper[4949]: I0216 11:31:59.278739 4949 generic.go:334] "Generic (PLEG): container finished" podID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerID="61eea95e9c0cc8ca6196d4f1b0b70447afa54bc0a4be198777c23caacda7d1e4" exitCode=143 Feb 16 11:31:59 crc kubenswrapper[4949]: I0216 11:31:59.279070 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57d8c9758b-hz957" event={"ID":"74cdae97-ce6a-4653-a84d-6f46d9795fbb","Type":"ContainerDied","Data":"61eea95e9c0cc8ca6196d4f1b0b70447afa54bc0a4be198777c23caacda7d1e4"} Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.310771 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d4bdcc77b-tmvps" event={"ID":"c53f95c1-224b-4026-9e51-9a2677621865","Type":"ContainerStarted","Data":"c4f8591cdecdea55b6bdc5cb968290ea79810d0600fc0fc861fd84410a28f676"} Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.315108 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.348469 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-566c9b565f-fv7vz" event={"ID":"c2936fd0-97ae-4028-a1b7-27feb4919790","Type":"ContainerStarted","Data":"cb53d12121ed0d6b6e6fbf2f2b518310f5ce068bb8929eee34ceaa215ae4370f"} Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.349267 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.366562 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5d4bdcc77b-tmvps" podStartSLOduration=4.511475424 podStartE2EDuration="9.366537687s" podCreationTimestamp="2026-02-16 11:31:51 +0000 UTC" firstStartedPulling="2026-02-16 11:31:54.24441116 +0000 UTC m=+1503.873745325" lastFinishedPulling="2026-02-16 11:31:59.099473423 +0000 UTC m=+1508.728807588" observedRunningTime="2026-02-16 11:32:00.336097568 +0000 UTC m=+1509.965431753" watchObservedRunningTime="2026-02-16 11:32:00.366537687 +0000 UTC m=+1509.995871852" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.370007 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-5db54b7646-q4qzf" podUID="d67acfe2-211f-45ab-b9ef-de039be62b4f" containerName="heat-api" containerID="cri-o://ac6fce3202c89a0242f51ebf394ed4a0688a2813079855338adc1948677cdfdb" gracePeriod=60 Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.370336 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5db54b7646-q4qzf" event={"ID":"d67acfe2-211f-45ab-b9ef-de039be62b4f","Type":"ContainerStarted","Data":"ac6fce3202c89a0242f51ebf394ed4a0688a2813079855338adc1948677cdfdb"} Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.370488 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.411748 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-566c9b565f-fv7vz" podStartSLOduration=3.668882548 podStartE2EDuration="7.411727156s" podCreationTimestamp="2026-02-16 11:31:53 +0000 UTC" firstStartedPulling="2026-02-16 11:31:55.356657036 +0000 UTC m=+1504.985991201" lastFinishedPulling="2026-02-16 11:31:59.099501644 +0000 UTC m=+1508.728835809" observedRunningTime="2026-02-16 11:32:00.379142816 +0000 UTC m=+1510.008476981" watchObservedRunningTime="2026-02-16 11:32:00.411727156 +0000 UTC m=+1510.041061321" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.416793 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" event={"ID":"16d14031-a4a4-4965-a1fc-b385b05df235","Type":"ContainerStarted","Data":"b0afa1d77ba60e4f91a6fd07801be2b1c3c3d04fe8ba25a009d59fca5dcf3dd5"} Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.418314 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.427030 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5db54b7646-q4qzf" podStartSLOduration=12.492917336 podStartE2EDuration="16.427002482s" podCreationTimestamp="2026-02-16 11:31:44 +0000 UTC" firstStartedPulling="2026-02-16 11:31:55.132734295 +0000 UTC m=+1504.762068460" lastFinishedPulling="2026-02-16 11:31:59.066819441 +0000 UTC m=+1508.696153606" observedRunningTime="2026-02-16 11:32:00.418818339 +0000 UTC m=+1510.048152504" watchObservedRunningTime="2026-02-16 11:32:00.427002482 +0000 UTC m=+1510.056336647" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.427428 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5cdc74fffc-n2hcr" event={"ID":"ecc029c7-0e9a-4211-8632-56c7b5e1b179","Type":"ContainerStarted","Data":"ecd0536bc73c052b5ec745e2fe5384064c84311126a705162e93a90b09dec304"} Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.428073 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.459567 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" event={"ID":"e4998389-d4f4-44f0-b048-988b96e27acc","Type":"ContainerStarted","Data":"552ad58cefc995110badccfa20da17f2f369157fbd4702a075f6df00eb1b9382"} Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.460507 4949 scope.go:117] "RemoveContainer" containerID="552ad58cefc995110badccfa20da17f2f369157fbd4702a075f6df00eb1b9382" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.472702 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" event={"ID":"cbb85a9a-237f-474a-b7d4-95f06bc44724","Type":"ContainerStarted","Data":"2297b313acd9feeb91692dfb7a903448519c493e0493757a9b33afae37d089b4"} Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.472946 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" podUID="cbb85a9a-237f-474a-b7d4-95f06bc44724" containerName="heat-cfnapi" containerID="cri-o://2297b313acd9feeb91692dfb7a903448519c493e0493757a9b33afae37d089b4" gracePeriod=60 Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.473404 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.474355 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" podStartSLOduration=16.474341244 podStartE2EDuration="16.474341244s" podCreationTimestamp="2026-02-16 11:31:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:32:00.45180337 +0000 UTC m=+1510.081137535" watchObservedRunningTime="2026-02-16 11:32:00.474341244 +0000 UTC m=+1510.103675409" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.544866 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5cdc74fffc-n2hcr" podStartSLOduration=3.834701761 podStartE2EDuration="7.544838146s" podCreationTimestamp="2026-02-16 11:31:53 +0000 UTC" firstStartedPulling="2026-02-16 11:31:55.389355729 +0000 UTC m=+1505.018689894" lastFinishedPulling="2026-02-16 11:31:59.099492104 +0000 UTC m=+1508.728826279" observedRunningTime="2026-02-16 11:32:00.495414225 +0000 UTC m=+1510.124748390" watchObservedRunningTime="2026-02-16 11:32:00.544838146 +0000 UTC m=+1510.174172311" Feb 16 11:32:00 crc kubenswrapper[4949]: I0216 11:32:00.568851 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" podStartSLOduration=12.274125631 podStartE2EDuration="16.56882806s" podCreationTimestamp="2026-02-16 11:31:44 +0000 UTC" firstStartedPulling="2026-02-16 11:31:54.772152453 +0000 UTC m=+1504.401486618" lastFinishedPulling="2026-02-16 11:31:59.066854882 +0000 UTC m=+1508.696189047" observedRunningTime="2026-02-16 11:32:00.519439831 +0000 UTC m=+1510.148773996" watchObservedRunningTime="2026-02-16 11:32:00.56882806 +0000 UTC m=+1510.198162225" Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.489019 4949 generic.go:334] "Generic (PLEG): container finished" podID="e4998389-d4f4-44f0-b048-988b96e27acc" containerID="552ad58cefc995110badccfa20da17f2f369157fbd4702a075f6df00eb1b9382" exitCode=1 Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.489137 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" event={"ID":"e4998389-d4f4-44f0-b048-988b96e27acc","Type":"ContainerDied","Data":"552ad58cefc995110badccfa20da17f2f369157fbd4702a075f6df00eb1b9382"} Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.489440 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.489456 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" event={"ID":"e4998389-d4f4-44f0-b048-988b96e27acc","Type":"ContainerStarted","Data":"d3fb4ce85ec36b9715e39d392f106a95a1d269d6347c50b0e741b83a54fa5188"} Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.492370 4949 generic.go:334] "Generic (PLEG): container finished" podID="c53f95c1-224b-4026-9e51-9a2677621865" containerID="c4f8591cdecdea55b6bdc5cb968290ea79810d0600fc0fc861fd84410a28f676" exitCode=1 Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.492460 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d4bdcc77b-tmvps" event={"ID":"c53f95c1-224b-4026-9e51-9a2677621865","Type":"ContainerDied","Data":"c4f8591cdecdea55b6bdc5cb968290ea79810d0600fc0fc861fd84410a28f676"} Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.493419 4949 scope.go:117] "RemoveContainer" containerID="c4f8591cdecdea55b6bdc5cb968290ea79810d0600fc0fc861fd84410a28f676" Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.522952 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" podStartSLOduration=6.213202804 podStartE2EDuration="10.522925252s" podCreationTimestamp="2026-02-16 11:31:51 +0000 UTC" firstStartedPulling="2026-02-16 11:31:54.789677513 +0000 UTC m=+1504.419011678" lastFinishedPulling="2026-02-16 11:31:59.099399961 +0000 UTC m=+1508.728734126" observedRunningTime="2026-02-16 11:32:01.514677967 +0000 UTC m=+1511.144012152" watchObservedRunningTime="2026-02-16 11:32:01.522925252 +0000 UTC m=+1511.152259427" Feb 16 11:32:01 crc kubenswrapper[4949]: I0216 11:32:01.748300 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.568125 4949 generic.go:334] "Generic (PLEG): container finished" podID="d67acfe2-211f-45ab-b9ef-de039be62b4f" containerID="ac6fce3202c89a0242f51ebf394ed4a0688a2813079855338adc1948677cdfdb" exitCode=0 Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.568589 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5db54b7646-q4qzf" event={"ID":"d67acfe2-211f-45ab-b9ef-de039be62b4f","Type":"ContainerDied","Data":"ac6fce3202c89a0242f51ebf394ed4a0688a2813079855338adc1948677cdfdb"} Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.596107 4949 generic.go:334] "Generic (PLEG): container finished" podID="e4998389-d4f4-44f0-b048-988b96e27acc" containerID="d3fb4ce85ec36b9715e39d392f106a95a1d269d6347c50b0e741b83a54fa5188" exitCode=1 Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.596210 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" event={"ID":"e4998389-d4f4-44f0-b048-988b96e27acc","Type":"ContainerDied","Data":"d3fb4ce85ec36b9715e39d392f106a95a1d269d6347c50b0e741b83a54fa5188"} Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.596258 4949 scope.go:117] "RemoveContainer" containerID="552ad58cefc995110badccfa20da17f2f369157fbd4702a075f6df00eb1b9382" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.597484 4949 scope.go:117] "RemoveContainer" containerID="d3fb4ce85ec36b9715e39d392f106a95a1d269d6347c50b0e741b83a54fa5188" Feb 16 11:32:02 crc kubenswrapper[4949]: E0216 11:32:02.597884 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5489d8946b-q8mgt_openstack(e4998389-d4f4-44f0-b048-988b96e27acc)\"" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.661639 4949 generic.go:334] "Generic (PLEG): container finished" podID="cbb85a9a-237f-474a-b7d4-95f06bc44724" containerID="2297b313acd9feeb91692dfb7a903448519c493e0493757a9b33afae37d089b4" exitCode=0 Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.661797 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" event={"ID":"cbb85a9a-237f-474a-b7d4-95f06bc44724","Type":"ContainerDied","Data":"2297b313acd9feeb91692dfb7a903448519c493e0493757a9b33afae37d089b4"} Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.680528 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sf64k"] Feb 16 11:32:02 crc kubenswrapper[4949]: E0216 11:32:02.681747 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.681774 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" Feb 16 11:32:02 crc kubenswrapper[4949]: E0216 11:32:02.681793 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerName="neutron-httpd" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.681799 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerName="neutron-httpd" Feb 16 11:32:02 crc kubenswrapper[4949]: E0216 11:32:02.681824 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="extract-content" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.681831 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="extract-content" Feb 16 11:32:02 crc kubenswrapper[4949]: E0216 11:32:02.681856 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="extract-utilities" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.681863 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="extract-utilities" Feb 16 11:32:02 crc kubenswrapper[4949]: E0216 11:32:02.681897 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerName="neutron-api" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.681904 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerName="neutron-api" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.682450 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerName="neutron-httpd" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.682482 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f44c7bf-e0a8-4250-b513-147e6e88718d" containerName="neutron-api" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.682498 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="acc263a1-4f57-4dca-bcc5-5d5388539a5d" containerName="registry-server" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.686188 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.703595 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sf64k"] Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.718684 4949 scope.go:117] "RemoveContainer" containerID="32b045667e36f74a7779b74a962feda63727f099cad34d3cb0b14940de390174" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.718936 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d4bdcc77b-tmvps" event={"ID":"c53f95c1-224b-4026-9e51-9a2677621865","Type":"ContainerStarted","Data":"32b045667e36f74a7779b74a962feda63727f099cad34d3cb0b14940de390174"} Feb 16 11:32:02 crc kubenswrapper[4949]: E0216 11:32:02.725343 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5d4bdcc77b-tmvps_openstack(c53f95c1-224b-4026-9e51-9a2677621865)\"" pod="openstack/heat-api-5d4bdcc77b-tmvps" podUID="c53f95c1-224b-4026-9e51-9a2677621865" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.738279 4949 generic.go:334] "Generic (PLEG): container finished" podID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerID="30dd2127088aa2dec617aca580389796ed51e24350de342faadf463373e71c1e" exitCode=0 Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.750882 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57d8c9758b-hz957" event={"ID":"74cdae97-ce6a-4653-a84d-6f46d9795fbb","Type":"ContainerDied","Data":"30dd2127088aa2dec617aca580389796ed51e24350de342faadf463373e71c1e"} Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.758026 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlvdx\" (UniqueName: \"kubernetes.io/projected/9d6c2dd5-e12e-427c-a512-496546dcc309-kube-api-access-mlvdx\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.795488 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-catalog-content\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.795717 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-utilities\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.902096 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-utilities\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.903537 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-utilities\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.909238 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlvdx\" (UniqueName: \"kubernetes.io/projected/9d6c2dd5-e12e-427c-a512-496546dcc309-kube-api-access-mlvdx\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.909557 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-catalog-content\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.913128 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-catalog-content\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:02 crc kubenswrapper[4949]: I0216 11:32:02.942077 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlvdx\" (UniqueName: \"kubernetes.io/projected/9d6c2dd5-e12e-427c-a512-496546dcc309-kube-api-access-mlvdx\") pod \"certified-operators-sf64k\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.032986 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.082924 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.115956 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74vnw\" (UniqueName: \"kubernetes.io/projected/d67acfe2-211f-45ab-b9ef-de039be62b4f-kube-api-access-74vnw\") pod \"d67acfe2-211f-45ab-b9ef-de039be62b4f\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.116033 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data-custom\") pod \"d67acfe2-211f-45ab-b9ef-de039be62b4f\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.116209 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data\") pod \"d67acfe2-211f-45ab-b9ef-de039be62b4f\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.116432 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-combined-ca-bundle\") pod \"d67acfe2-211f-45ab-b9ef-de039be62b4f\" (UID: \"d67acfe2-211f-45ab-b9ef-de039be62b4f\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.143715 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d67acfe2-211f-45ab-b9ef-de039be62b4f-kube-api-access-74vnw" (OuterVolumeSpecName: "kube-api-access-74vnw") pod "d67acfe2-211f-45ab-b9ef-de039be62b4f" (UID: "d67acfe2-211f-45ab-b9ef-de039be62b4f"). InnerVolumeSpecName "kube-api-access-74vnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.157202 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d67acfe2-211f-45ab-b9ef-de039be62b4f" (UID: "d67acfe2-211f-45ab-b9ef-de039be62b4f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.170247 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74vnw\" (UniqueName: \"kubernetes.io/projected/d67acfe2-211f-45ab-b9ef-de039be62b4f-kube-api-access-74vnw\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.170361 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.284604 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d67acfe2-211f-45ab-b9ef-de039be62b4f" (UID: "d67acfe2-211f-45ab-b9ef-de039be62b4f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.350319 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.376967 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data-custom\") pod \"cbb85a9a-237f-474a-b7d4-95f06bc44724\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.377106 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-combined-ca-bundle\") pod \"cbb85a9a-237f-474a-b7d4-95f06bc44724\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.377282 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtqt7\" (UniqueName: \"kubernetes.io/projected/cbb85a9a-237f-474a-b7d4-95f06bc44724-kube-api-access-dtqt7\") pod \"cbb85a9a-237f-474a-b7d4-95f06bc44724\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.377407 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data\") pod \"cbb85a9a-237f-474a-b7d4-95f06bc44724\" (UID: \"cbb85a9a-237f-474a-b7d4-95f06bc44724\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.379036 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.392448 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbb85a9a-237f-474a-b7d4-95f06bc44724-kube-api-access-dtqt7" (OuterVolumeSpecName: "kube-api-access-dtqt7") pod "cbb85a9a-237f-474a-b7d4-95f06bc44724" (UID: "cbb85a9a-237f-474a-b7d4-95f06bc44724"). InnerVolumeSpecName "kube-api-access-dtqt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.392858 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cbb85a9a-237f-474a-b7d4-95f06bc44724" (UID: "cbb85a9a-237f-474a-b7d4-95f06bc44724"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.413250 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data" (OuterVolumeSpecName: "config-data") pod "d67acfe2-211f-45ab-b9ef-de039be62b4f" (UID: "d67acfe2-211f-45ab-b9ef-de039be62b4f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.433258 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.444303 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cbb85a9a-237f-474a-b7d4-95f06bc44724" (UID: "cbb85a9a-237f-474a-b7d4-95f06bc44724"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.482522 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-combined-ca-bundle\") pod \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.492563 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74cdae97-ce6a-4653-a84d-6f46d9795fbb-logs\") pod \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.492891 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkxb7\" (UniqueName: \"kubernetes.io/projected/74cdae97-ce6a-4653-a84d-6f46d9795fbb-kube-api-access-hkxb7\") pod \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.493033 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-config-data\") pod \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.493302 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-public-tls-certs\") pod \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.493483 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-internal-tls-certs\") pod \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.493688 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-scripts\") pod \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\" (UID: \"74cdae97-ce6a-4653-a84d-6f46d9795fbb\") " Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.493507 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74cdae97-ce6a-4653-a84d-6f46d9795fbb-logs" (OuterVolumeSpecName: "logs") pod "74cdae97-ce6a-4653-a84d-6f46d9795fbb" (UID: "74cdae97-ce6a-4653-a84d-6f46d9795fbb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.494522 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data" (OuterVolumeSpecName: "config-data") pod "cbb85a9a-237f-474a-b7d4-95f06bc44724" (UID: "cbb85a9a-237f-474a-b7d4-95f06bc44724"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.501264 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtqt7\" (UniqueName: \"kubernetes.io/projected/cbb85a9a-237f-474a-b7d4-95f06bc44724-kube-api-access-dtqt7\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.501384 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d67acfe2-211f-45ab-b9ef-de039be62b4f-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.501465 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.501541 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.526990 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74cdae97-ce6a-4653-a84d-6f46d9795fbb-kube-api-access-hkxb7" (OuterVolumeSpecName: "kube-api-access-hkxb7") pod "74cdae97-ce6a-4653-a84d-6f46d9795fbb" (UID: "74cdae97-ce6a-4653-a84d-6f46d9795fbb"). InnerVolumeSpecName "kube-api-access-hkxb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.527070 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-scripts" (OuterVolumeSpecName: "scripts") pod "74cdae97-ce6a-4653-a84d-6f46d9795fbb" (UID: "74cdae97-ce6a-4653-a84d-6f46d9795fbb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.611341 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbb85a9a-237f-474a-b7d4-95f06bc44724-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.611371 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.611385 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74cdae97-ce6a-4653-a84d-6f46d9795fbb-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.611394 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkxb7\" (UniqueName: \"kubernetes.io/projected/74cdae97-ce6a-4653-a84d-6f46d9795fbb-kube-api-access-hkxb7\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.615478 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-config-data" (OuterVolumeSpecName: "config-data") pod "74cdae97-ce6a-4653-a84d-6f46d9795fbb" (UID: "74cdae97-ce6a-4653-a84d-6f46d9795fbb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.653003 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74cdae97-ce6a-4653-a84d-6f46d9795fbb" (UID: "74cdae97-ce6a-4653-a84d-6f46d9795fbb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.719586 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.719640 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.772946 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "74cdae97-ce6a-4653-a84d-6f46d9795fbb" (UID: "74cdae97-ce6a-4653-a84d-6f46d9795fbb"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.788291 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "74cdae97-ce6a-4653-a84d-6f46d9795fbb" (UID: "74cdae97-ce6a-4653-a84d-6f46d9795fbb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.822628 4949 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.822675 4949 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74cdae97-ce6a-4653-a84d-6f46d9795fbb-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.842639 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5db54b7646-q4qzf" event={"ID":"d67acfe2-211f-45ab-b9ef-de039be62b4f","Type":"ContainerDied","Data":"5194196f3e7a23a197b7f35169f5e54f0e0929cf9591ae7f50818257b3b0fee0"} Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.842711 4949 scope.go:117] "RemoveContainer" containerID="ac6fce3202c89a0242f51ebf394ed4a0688a2813079855338adc1948677cdfdb" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.842870 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5db54b7646-q4qzf" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.857294 4949 scope.go:117] "RemoveContainer" containerID="d3fb4ce85ec36b9715e39d392f106a95a1d269d6347c50b0e741b83a54fa5188" Feb 16 11:32:03 crc kubenswrapper[4949]: E0216 11:32:03.857681 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5489d8946b-q8mgt_openstack(e4998389-d4f4-44f0-b048-988b96e27acc)\"" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.870101 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" event={"ID":"cbb85a9a-237f-474a-b7d4-95f06bc44724","Type":"ContainerDied","Data":"e4252a65cbc88b3bcc579e7b17a624a80c58e68af3e1e8861119fc9f72dd7871"} Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.870292 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7dc46f9d6-d9jsn" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.887159 4949 generic.go:334] "Generic (PLEG): container finished" podID="c53f95c1-224b-4026-9e51-9a2677621865" containerID="32b045667e36f74a7779b74a962feda63727f099cad34d3cb0b14940de390174" exitCode=1 Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.887268 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d4bdcc77b-tmvps" event={"ID":"c53f95c1-224b-4026-9e51-9a2677621865","Type":"ContainerDied","Data":"32b045667e36f74a7779b74a962feda63727f099cad34d3cb0b14940de390174"} Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.888255 4949 scope.go:117] "RemoveContainer" containerID="32b045667e36f74a7779b74a962feda63727f099cad34d3cb0b14940de390174" Feb 16 11:32:03 crc kubenswrapper[4949]: E0216 11:32:03.888535 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5d4bdcc77b-tmvps_openstack(c53f95c1-224b-4026-9e51-9a2677621865)\"" pod="openstack/heat-api-5d4bdcc77b-tmvps" podUID="c53f95c1-224b-4026-9e51-9a2677621865" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.892055 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57d8c9758b-hz957" event={"ID":"74cdae97-ce6a-4653-a84d-6f46d9795fbb","Type":"ContainerDied","Data":"8924c6378e26a9eaf8688f6e6979bb9fc5982c30040f0bc21de580aa2bf9f5c1"} Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.892336 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-57d8c9758b-hz957" Feb 16 11:32:03 crc kubenswrapper[4949]: I0216 11:32:03.922319 4949 scope.go:117] "RemoveContainer" containerID="2297b313acd9feeb91692dfb7a903448519c493e0493757a9b33afae37d089b4" Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.012228 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sf64k"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.016153 4949 scope.go:117] "RemoveContainer" containerID="c4f8591cdecdea55b6bdc5cb968290ea79810d0600fc0fc861fd84410a28f676" Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.073122 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5db54b7646-q4qzf"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.100009 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5db54b7646-q4qzf"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.124544 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-57d8c9758b-hz957"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.147260 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-57d8c9758b-hz957"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.165642 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7dc46f9d6-d9jsn"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.174202 4949 scope.go:117] "RemoveContainer" containerID="30dd2127088aa2dec617aca580389796ed51e24350de342faadf463373e71c1e" Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.183053 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7dc46f9d6-d9jsn"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.234222 4949 scope.go:117] "RemoveContainer" containerID="61eea95e9c0cc8ca6196d4f1b0b70447afa54bc0a4be198777c23caacda7d1e4" Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.552618 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.663485 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.827534 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-f2c9s"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.828043 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" podUID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" containerName="dnsmasq-dns" containerID="cri-o://1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc" gracePeriod=10 Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.941936 4949 scope.go:117] "RemoveContainer" containerID="32b045667e36f74a7779b74a962feda63727f099cad34d3cb0b14940de390174" Feb 16 11:32:04 crc kubenswrapper[4949]: E0216 11:32:04.942331 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5d4bdcc77b-tmvps_openstack(c53f95c1-224b-4026-9e51-9a2677621865)\"" pod="openstack/heat-api-5d4bdcc77b-tmvps" podUID="c53f95c1-224b-4026-9e51-9a2677621865" Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.956560 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.957207 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-log" containerID="cri-o://6e14183673e66ee7f99da970e9998cd1900b24e724e8678ee11c26b965919ae4" gracePeriod=30 Feb 16 11:32:04 crc kubenswrapper[4949]: I0216 11:32:04.957363 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-httpd" containerID="cri-o://69c4717876345652ee32a203179fde6dd840ec23dd97af04f5ce8ed8f39031cd" gracePeriod=30 Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.045025 4949 generic.go:334] "Generic (PLEG): container finished" podID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerID="9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9" exitCode=0 Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.045114 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf64k" event={"ID":"9d6c2dd5-e12e-427c-a512-496546dcc309","Type":"ContainerDied","Data":"9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9"} Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.045155 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf64k" event={"ID":"9d6c2dd5-e12e-427c-a512-496546dcc309","Type":"ContainerStarted","Data":"3ea23b8033f9c896fa9756b5106fc16d9390297354c4ac22190db49ee656de7e"} Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.264674 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" path="/var/lib/kubelet/pods/74cdae97-ce6a-4653-a84d-6f46d9795fbb/volumes" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.265604 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbb85a9a-237f-474a-b7d4-95f06bc44724" path="/var/lib/kubelet/pods/cbb85a9a-237f-474a-b7d4-95f06bc44724/volumes" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.266412 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d67acfe2-211f-45ab-b9ef-de039be62b4f" path="/var/lib/kubelet/pods/d67acfe2-211f-45ab-b9ef-de039be62b4f/volumes" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.663571 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.729938 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-sb\") pod \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.730094 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-nb\") pod \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.730254 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-swift-storage-0\") pod \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.730360 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrhl4\" (UniqueName: \"kubernetes.io/projected/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-kube-api-access-xrhl4\") pod \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.730455 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-svc\") pod \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.730546 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-config\") pod \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\" (UID: \"2345d1ab-b1c1-4959-a5ac-5da97eda36e3\") " Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.762995 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-kube-api-access-xrhl4" (OuterVolumeSpecName: "kube-api-access-xrhl4") pod "2345d1ab-b1c1-4959-a5ac-5da97eda36e3" (UID: "2345d1ab-b1c1-4959-a5ac-5da97eda36e3"). InnerVolumeSpecName "kube-api-access-xrhl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.833916 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2345d1ab-b1c1-4959-a5ac-5da97eda36e3" (UID: "2345d1ab-b1c1-4959-a5ac-5da97eda36e3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.836244 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrhl4\" (UniqueName: \"kubernetes.io/projected/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-kube-api-access-xrhl4\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.836280 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.848846 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2345d1ab-b1c1-4959-a5ac-5da97eda36e3" (UID: "2345d1ab-b1c1-4959-a5ac-5da97eda36e3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.862442 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2345d1ab-b1c1-4959-a5ac-5da97eda36e3" (UID: "2345d1ab-b1c1-4959-a5ac-5da97eda36e3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.872013 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-config" (OuterVolumeSpecName: "config") pod "2345d1ab-b1c1-4959-a5ac-5da97eda36e3" (UID: "2345d1ab-b1c1-4959-a5ac-5da97eda36e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.885680 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2345d1ab-b1c1-4959-a5ac-5da97eda36e3" (UID: "2345d1ab-b1c1-4959-a5ac-5da97eda36e3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.939282 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.939333 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.939348 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:05 crc kubenswrapper[4949]: I0216 11:32:05.939362 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2345d1ab-b1c1-4959-a5ac-5da97eda36e3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.067007 4949 generic.go:334] "Generic (PLEG): container finished" podID="a5b65741-25f3-43db-a544-85997388cfea" containerID="6e14183673e66ee7f99da970e9998cd1900b24e724e8678ee11c26b965919ae4" exitCode=143 Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.067116 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b65741-25f3-43db-a544-85997388cfea","Type":"ContainerDied","Data":"6e14183673e66ee7f99da970e9998cd1900b24e724e8678ee11c26b965919ae4"} Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.073097 4949 generic.go:334] "Generic (PLEG): container finished" podID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" containerID="1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc" exitCode=0 Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.073191 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.073207 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" event={"ID":"2345d1ab-b1c1-4959-a5ac-5da97eda36e3","Type":"ContainerDied","Data":"1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc"} Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.073775 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-f2c9s" event={"ID":"2345d1ab-b1c1-4959-a5ac-5da97eda36e3","Type":"ContainerDied","Data":"c5c568ce011540a2f01f66505ed694652acb79a94ce6b1e34edf3205b0512c34"} Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.073807 4949 scope.go:117] "RemoveContainer" containerID="1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.104046 4949 scope.go:117] "RemoveContainer" containerID="fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.130299 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-f2c9s"] Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.145522 4949 scope.go:117] "RemoveContainer" containerID="1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc" Feb 16 11:32:06 crc kubenswrapper[4949]: E0216 11:32:06.147182 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc\": container with ID starting with 1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc not found: ID does not exist" containerID="1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.147228 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc"} err="failed to get container status \"1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc\": rpc error: code = NotFound desc = could not find container \"1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc\": container with ID starting with 1f0446c6bed40a5d7d999c32d0f5d94b7d2ae5ff811cb354123c471f37a736cc not found: ID does not exist" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.147265 4949 scope.go:117] "RemoveContainer" containerID="fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2" Feb 16 11:32:06 crc kubenswrapper[4949]: E0216 11:32:06.148287 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2\": container with ID starting with fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2 not found: ID does not exist" containerID="fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.148316 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2"} err="failed to get container status \"fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2\": rpc error: code = NotFound desc = could not find container \"fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2\": container with ID starting with fd38d618837ad072ec0a2b8c4d1adf6501103e4920b337bef23531dea20399c2 not found: ID does not exist" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.150131 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-f2c9s"] Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.513606 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.513972 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="495daabb-a227-4235-ace3-6caae6936da4" containerName="glance-log" containerID="cri-o://f3bf7647a6b8f333884c019d6fe52e91fe985607e23774e732324644cc5a338e" gracePeriod=30 Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.514088 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="495daabb-a227-4235-ace3-6caae6936da4" containerName="glance-httpd" containerID="cri-o://df915a5fb326ecb825acc3a5561fc8c91a49c4de269d1de2e19b6f78cd7f8925" gracePeriod=30 Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.748801 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.748881 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.749961 4949 scope.go:117] "RemoveContainer" containerID="32b045667e36f74a7779b74a962feda63727f099cad34d3cb0b14940de390174" Feb 16 11:32:06 crc kubenswrapper[4949]: E0216 11:32:06.750563 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5d4bdcc77b-tmvps_openstack(c53f95c1-224b-4026-9e51-9a2677621865)\"" pod="openstack/heat-api-5d4bdcc77b-tmvps" podUID="c53f95c1-224b-4026-9e51-9a2677621865" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.802589 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:32:06 crc kubenswrapper[4949]: I0216 11:32:06.803813 4949 scope.go:117] "RemoveContainer" containerID="d3fb4ce85ec36b9715e39d392f106a95a1d269d6347c50b0e741b83a54fa5188" Feb 16 11:32:06 crc kubenswrapper[4949]: E0216 11:32:06.804203 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5489d8946b-q8mgt_openstack(e4998389-d4f4-44f0-b048-988b96e27acc)\"" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" Feb 16 11:32:07 crc kubenswrapper[4949]: I0216 11:32:07.106205 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf64k" event={"ID":"9d6c2dd5-e12e-427c-a512-496546dcc309","Type":"ContainerStarted","Data":"b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6"} Feb 16 11:32:07 crc kubenswrapper[4949]: I0216 11:32:07.113511 4949 generic.go:334] "Generic (PLEG): container finished" podID="495daabb-a227-4235-ace3-6caae6936da4" containerID="f3bf7647a6b8f333884c019d6fe52e91fe985607e23774e732324644cc5a338e" exitCode=143 Feb 16 11:32:07 crc kubenswrapper[4949]: I0216 11:32:07.113570 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"495daabb-a227-4235-ace3-6caae6936da4","Type":"ContainerDied","Data":"f3bf7647a6b8f333884c019d6fe52e91fe985607e23774e732324644cc5a338e"} Feb 16 11:32:07 crc kubenswrapper[4949]: I0216 11:32:07.252104 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" path="/var/lib/kubelet/pods/2345d1ab-b1c1-4959-a5ac-5da97eda36e3/volumes" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.131009 4949 generic.go:334] "Generic (PLEG): container finished" podID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerID="b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6" exitCode=0 Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.131107 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf64k" event={"ID":"9d6c2dd5-e12e-427c-a512-496546dcc309","Type":"ContainerDied","Data":"b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6"} Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.166443 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-9jxjt"] Feb 16 11:32:08 crc kubenswrapper[4949]: E0216 11:32:08.167362 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerName="placement-api" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.167382 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerName="placement-api" Feb 16 11:32:08 crc kubenswrapper[4949]: E0216 11:32:08.167419 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerName="placement-log" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.167426 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerName="placement-log" Feb 16 11:32:08 crc kubenswrapper[4949]: E0216 11:32:08.167448 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbb85a9a-237f-474a-b7d4-95f06bc44724" containerName="heat-cfnapi" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.167455 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbb85a9a-237f-474a-b7d4-95f06bc44724" containerName="heat-cfnapi" Feb 16 11:32:08 crc kubenswrapper[4949]: E0216 11:32:08.167466 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" containerName="init" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.168404 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" containerName="init" Feb 16 11:32:08 crc kubenswrapper[4949]: E0216 11:32:08.168448 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d67acfe2-211f-45ab-b9ef-de039be62b4f" containerName="heat-api" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.168456 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d67acfe2-211f-45ab-b9ef-de039be62b4f" containerName="heat-api" Feb 16 11:32:08 crc kubenswrapper[4949]: E0216 11:32:08.168468 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" containerName="dnsmasq-dns" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.168474 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" containerName="dnsmasq-dns" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.168747 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerName="placement-api" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.168769 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="2345d1ab-b1c1-4959-a5ac-5da97eda36e3" containerName="dnsmasq-dns" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.168789 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="74cdae97-ce6a-4653-a84d-6f46d9795fbb" containerName="placement-log" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.168805 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d67acfe2-211f-45ab-b9ef-de039be62b4f" containerName="heat-api" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.168824 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbb85a9a-237f-474a-b7d4-95f06bc44724" containerName="heat-cfnapi" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.170761 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.182027 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.191:9292/healthcheck\": read tcp 10.217.0.2:41498->10.217.0.191:9292: read: connection reset by peer" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.184723 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.191:9292/healthcheck\": read tcp 10.217.0.2:41486->10.217.0.191:9292: read: connection reset by peer" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.205832 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-9jxjt"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.238228 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-575a-account-create-update-v2lxv"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.240023 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.245439 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.285247 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-575a-account-create-update-v2lxv"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.299224 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-7czt8"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.301843 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.308600 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-operator-scripts\") pod \"nova-api-db-create-9jxjt\" (UID: \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\") " pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.309581 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gnc4\" (UniqueName: \"kubernetes.io/projected/2394da0b-9528-4d78-9fca-bf224b27ad5b-kube-api-access-6gnc4\") pod \"nova-api-575a-account-create-update-v2lxv\" (UID: \"2394da0b-9528-4d78-9fca-bf224b27ad5b\") " pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.309732 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2394da0b-9528-4d78-9fca-bf224b27ad5b-operator-scripts\") pod \"nova-api-575a-account-create-update-v2lxv\" (UID: \"2394da0b-9528-4d78-9fca-bf224b27ad5b\") " pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.309794 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69sxz\" (UniqueName: \"kubernetes.io/projected/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-kube-api-access-69sxz\") pod \"nova-api-db-create-9jxjt\" (UID: \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\") " pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.320009 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7czt8"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.437399 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gnc4\" (UniqueName: \"kubernetes.io/projected/2394da0b-9528-4d78-9fca-bf224b27ad5b-kube-api-access-6gnc4\") pod \"nova-api-575a-account-create-update-v2lxv\" (UID: \"2394da0b-9528-4d78-9fca-bf224b27ad5b\") " pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.437508 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c894b203-cec9-49e4-9b92-bdf185ad11fa-operator-scripts\") pod \"nova-cell0-db-create-7czt8\" (UID: \"c894b203-cec9-49e4-9b92-bdf185ad11fa\") " pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.437667 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2394da0b-9528-4d78-9fca-bf224b27ad5b-operator-scripts\") pod \"nova-api-575a-account-create-update-v2lxv\" (UID: \"2394da0b-9528-4d78-9fca-bf224b27ad5b\") " pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.437741 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69sxz\" (UniqueName: \"kubernetes.io/projected/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-kube-api-access-69sxz\") pod \"nova-api-db-create-9jxjt\" (UID: \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\") " pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.437888 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h548g\" (UniqueName: \"kubernetes.io/projected/c894b203-cec9-49e4-9b92-bdf185ad11fa-kube-api-access-h548g\") pod \"nova-cell0-db-create-7czt8\" (UID: \"c894b203-cec9-49e4-9b92-bdf185ad11fa\") " pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.445002 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-operator-scripts\") pod \"nova-api-db-create-9jxjt\" (UID: \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\") " pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.446295 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-operator-scripts\") pod \"nova-api-db-create-9jxjt\" (UID: \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\") " pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.447159 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2394da0b-9528-4d78-9fca-bf224b27ad5b-operator-scripts\") pod \"nova-api-575a-account-create-update-v2lxv\" (UID: \"2394da0b-9528-4d78-9fca-bf224b27ad5b\") " pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.449373 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-8lkkq"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.451723 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.488362 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gnc4\" (UniqueName: \"kubernetes.io/projected/2394da0b-9528-4d78-9fca-bf224b27ad5b-kube-api-access-6gnc4\") pod \"nova-api-575a-account-create-update-v2lxv\" (UID: \"2394da0b-9528-4d78-9fca-bf224b27ad5b\") " pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.490209 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69sxz\" (UniqueName: \"kubernetes.io/projected/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-kube-api-access-69sxz\") pod \"nova-api-db-create-9jxjt\" (UID: \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\") " pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.498359 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-a249-account-create-update-9lm5t"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.498883 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.499972 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.510587 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.523626 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a249-account-create-update-9lm5t"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.546610 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8lkkq"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.548430 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c894b203-cec9-49e4-9b92-bdf185ad11fa-operator-scripts\") pod \"nova-cell0-db-create-7czt8\" (UID: \"c894b203-cec9-49e4-9b92-bdf185ad11fa\") " pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.548491 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wl4j\" (UniqueName: \"kubernetes.io/projected/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-kube-api-access-6wl4j\") pod \"nova-cell1-db-create-8lkkq\" (UID: \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\") " pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.548584 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h548g\" (UniqueName: \"kubernetes.io/projected/c894b203-cec9-49e4-9b92-bdf185ad11fa-kube-api-access-h548g\") pod \"nova-cell0-db-create-7czt8\" (UID: \"c894b203-cec9-49e4-9b92-bdf185ad11fa\") " pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.548685 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-operator-scripts\") pod \"nova-cell1-db-create-8lkkq\" (UID: \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\") " pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.549810 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c894b203-cec9-49e4-9b92-bdf185ad11fa-operator-scripts\") pod \"nova-cell0-db-create-7czt8\" (UID: \"c894b203-cec9-49e4-9b92-bdf185ad11fa\") " pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.564277 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g6wsf"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.567131 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.589612 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h548g\" (UniqueName: \"kubernetes.io/projected/c894b203-cec9-49e4-9b92-bdf185ad11fa-kube-api-access-h548g\") pod \"nova-cell0-db-create-7czt8\" (UID: \"c894b203-cec9-49e4-9b92-bdf185ad11fa\") " pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.608256 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g6wsf"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.633393 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-581e-account-create-update-hnv29"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.635943 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.642206 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.648427 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-581e-account-create-update-hnv29"] Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.656202 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkrj8\" (UniqueName: \"kubernetes.io/projected/d0913052-0fd5-45bf-a268-47dc86f7af86-kube-api-access-pkrj8\") pod \"nova-cell0-a249-account-create-update-9lm5t\" (UID: \"d0913052-0fd5-45bf-a268-47dc86f7af86\") " pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.656296 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntrkl\" (UniqueName: \"kubernetes.io/projected/91104758-7e02-4761-bc39-fbca029cda0f-kube-api-access-ntrkl\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.656577 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-operator-scripts\") pod \"nova-cell1-db-create-8lkkq\" (UID: \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\") " pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.656764 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-utilities\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.656822 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wl4j\" (UniqueName: \"kubernetes.io/projected/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-kube-api-access-6wl4j\") pod \"nova-cell1-db-create-8lkkq\" (UID: \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\") " pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.656844 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0913052-0fd5-45bf-a268-47dc86f7af86-operator-scripts\") pod \"nova-cell0-a249-account-create-update-9lm5t\" (UID: \"d0913052-0fd5-45bf-a268-47dc86f7af86\") " pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.656875 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-catalog-content\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.678862 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-operator-scripts\") pod \"nova-cell1-db-create-8lkkq\" (UID: \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\") " pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.737070 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wl4j\" (UniqueName: \"kubernetes.io/projected/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-kube-api-access-6wl4j\") pod \"nova-cell1-db-create-8lkkq\" (UID: \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\") " pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.749993 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.759797 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0913052-0fd5-45bf-a268-47dc86f7af86-operator-scripts\") pod \"nova-cell0-a249-account-create-update-9lm5t\" (UID: \"d0913052-0fd5-45bf-a268-47dc86f7af86\") " pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.772394 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-catalog-content\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.772910 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkrj8\" (UniqueName: \"kubernetes.io/projected/d0913052-0fd5-45bf-a268-47dc86f7af86-kube-api-access-pkrj8\") pod \"nova-cell0-a249-account-create-update-9lm5t\" (UID: \"d0913052-0fd5-45bf-a268-47dc86f7af86\") " pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.773000 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-catalog-content\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.773236 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntrkl\" (UniqueName: \"kubernetes.io/projected/91104758-7e02-4761-bc39-fbca029cda0f-kube-api-access-ntrkl\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.773431 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5dd2\" (UniqueName: \"kubernetes.io/projected/80642c6c-aa06-4c35-ab96-3c2114889ee5-kube-api-access-w5dd2\") pod \"nova-cell1-581e-account-create-update-hnv29\" (UID: \"80642c6c-aa06-4c35-ab96-3c2114889ee5\") " pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.773914 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-utilities\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.774543 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80642c6c-aa06-4c35-ab96-3c2114889ee5-operator-scripts\") pod \"nova-cell1-581e-account-create-update-hnv29\" (UID: \"80642c6c-aa06-4c35-ab96-3c2114889ee5\") " pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.775141 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-utilities\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.772461 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0913052-0fd5-45bf-a268-47dc86f7af86-operator-scripts\") pod \"nova-cell0-a249-account-create-update-9lm5t\" (UID: \"d0913052-0fd5-45bf-a268-47dc86f7af86\") " pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.783105 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.839750 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkrj8\" (UniqueName: \"kubernetes.io/projected/d0913052-0fd5-45bf-a268-47dc86f7af86-kube-api-access-pkrj8\") pod \"nova-cell0-a249-account-create-update-9lm5t\" (UID: \"d0913052-0fd5-45bf-a268-47dc86f7af86\") " pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.851093 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntrkl\" (UniqueName: \"kubernetes.io/projected/91104758-7e02-4761-bc39-fbca029cda0f-kube-api-access-ntrkl\") pod \"community-operators-g6wsf\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.857000 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.885109 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5dd2\" (UniqueName: \"kubernetes.io/projected/80642c6c-aa06-4c35-ab96-3c2114889ee5-kube-api-access-w5dd2\") pod \"nova-cell1-581e-account-create-update-hnv29\" (UID: \"80642c6c-aa06-4c35-ab96-3c2114889ee5\") " pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.885526 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.888061 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80642c6c-aa06-4c35-ab96-3c2114889ee5-operator-scripts\") pod \"nova-cell1-581e-account-create-update-hnv29\" (UID: \"80642c6c-aa06-4c35-ab96-3c2114889ee5\") " pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.897297 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80642c6c-aa06-4c35-ab96-3c2114889ee5-operator-scripts\") pod \"nova-cell1-581e-account-create-update-hnv29\" (UID: \"80642c6c-aa06-4c35-ab96-3c2114889ee5\") " pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.921692 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5dd2\" (UniqueName: \"kubernetes.io/projected/80642c6c-aa06-4c35-ab96-3c2114889ee5-kube-api-access-w5dd2\") pod \"nova-cell1-581e-account-create-update-hnv29\" (UID: \"80642c6c-aa06-4c35-ab96-3c2114889ee5\") " pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:08 crc kubenswrapper[4949]: I0216 11:32:08.924604 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.024427 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.298423 4949 generic.go:334] "Generic (PLEG): container finished" podID="a5b65741-25f3-43db-a544-85997388cfea" containerID="69c4717876345652ee32a203179fde6dd840ec23dd97af04f5ce8ed8f39031cd" exitCode=0 Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.298741 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b65741-25f3-43db-a544-85997388cfea","Type":"ContainerDied","Data":"69c4717876345652ee32a203179fde6dd840ec23dd97af04f5ce8ed8f39031cd"} Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.367348 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf64k" event={"ID":"9d6c2dd5-e12e-427c-a512-496546dcc309","Type":"ContainerStarted","Data":"fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29"} Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.417913 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sf64k" podStartSLOduration=3.852432005 podStartE2EDuration="7.41785852s" podCreationTimestamp="2026-02-16 11:32:02 +0000 UTC" firstStartedPulling="2026-02-16 11:32:05.054904312 +0000 UTC m=+1514.684238477" lastFinishedPulling="2026-02-16 11:32:08.620330827 +0000 UTC m=+1518.249664992" observedRunningTime="2026-02-16 11:32:09.397135768 +0000 UTC m=+1519.026469943" watchObservedRunningTime="2026-02-16 11:32:09.41785852 +0000 UTC m=+1519.047192685" Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.807159 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.827201 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-9jxjt"] Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.936021 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-public-tls-certs\") pod \"a5b65741-25f3-43db-a544-85997388cfea\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.936132 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-httpd-run\") pod \"a5b65741-25f3-43db-a544-85997388cfea\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.936201 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-combined-ca-bundle\") pod \"a5b65741-25f3-43db-a544-85997388cfea\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.936310 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntpln\" (UniqueName: \"kubernetes.io/projected/a5b65741-25f3-43db-a544-85997388cfea-kube-api-access-ntpln\") pod \"a5b65741-25f3-43db-a544-85997388cfea\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.936353 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-logs\") pod \"a5b65741-25f3-43db-a544-85997388cfea\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.936501 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"a5b65741-25f3-43db-a544-85997388cfea\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.936646 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-scripts\") pod \"a5b65741-25f3-43db-a544-85997388cfea\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.936687 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-config-data\") pod \"a5b65741-25f3-43db-a544-85997388cfea\" (UID: \"a5b65741-25f3-43db-a544-85997388cfea\") " Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.938482 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-logs" (OuterVolumeSpecName: "logs") pod "a5b65741-25f3-43db-a544-85997388cfea" (UID: "a5b65741-25f3-43db-a544-85997388cfea"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.947835 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a5b65741-25f3-43db-a544-85997388cfea" (UID: "a5b65741-25f3-43db-a544-85997388cfea"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.974537 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-scripts" (OuterVolumeSpecName: "scripts") pod "a5b65741-25f3-43db-a544-85997388cfea" (UID: "a5b65741-25f3-43db-a544-85997388cfea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:09 crc kubenswrapper[4949]: I0216 11:32:09.976369 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5b65741-25f3-43db-a544-85997388cfea-kube-api-access-ntpln" (OuterVolumeSpecName: "kube-api-access-ntpln") pod "a5b65741-25f3-43db-a544-85997388cfea" (UID: "a5b65741-25f3-43db-a544-85997388cfea"). InnerVolumeSpecName "kube-api-access-ntpln". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.015595 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf" (OuterVolumeSpecName: "glance") pod "a5b65741-25f3-43db-a544-85997388cfea" (UID: "a5b65741-25f3-43db-a544-85997388cfea"). InnerVolumeSpecName "pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.080322 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5b65741-25f3-43db-a544-85997388cfea" (UID: "a5b65741-25f3-43db-a544-85997388cfea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.082119 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-config-data" (OuterVolumeSpecName: "config-data") pod "a5b65741-25f3-43db-a544-85997388cfea" (UID: "a5b65741-25f3-43db-a544-85997388cfea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.089772 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a5b65741-25f3-43db-a544-85997388cfea" (UID: "a5b65741-25f3-43db-a544-85997388cfea"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.098972 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.099017 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.099033 4949 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.099046 4949 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.099058 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b65741-25f3-43db-a544-85997388cfea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.099070 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntpln\" (UniqueName: \"kubernetes.io/projected/a5b65741-25f3-43db-a544-85997388cfea-kube-api-access-ntpln\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.099086 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b65741-25f3-43db-a544-85997388cfea-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.099123 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") on node \"crc\" " Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.165156 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.165386 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf") on node "crc" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.204616 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.406423 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b65741-25f3-43db-a544-85997388cfea","Type":"ContainerDied","Data":"a295d66316bcdebc2d40746024ffd37d2ca845aecac50535544d493ac8bb845c"} Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.406823 4949 scope.go:117] "RemoveContainer" containerID="69c4717876345652ee32a203179fde6dd840ec23dd97af04f5ce8ed8f39031cd" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.406916 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.417630 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9jxjt" event={"ID":"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88","Type":"ContainerStarted","Data":"3e6100b2a15e9237f1f89574d8931d9ae5825eb890973066b67067965cad1abc"} Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.428294 4949 generic.go:334] "Generic (PLEG): container finished" podID="495daabb-a227-4235-ace3-6caae6936da4" containerID="df915a5fb326ecb825acc3a5561fc8c91a49c4de269d1de2e19b6f78cd7f8925" exitCode=0 Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.431455 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"495daabb-a227-4235-ace3-6caae6936da4","Type":"ContainerDied","Data":"df915a5fb326ecb825acc3a5561fc8c91a49c4de269d1de2e19b6f78cd7f8925"} Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.470670 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7czt8"] Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.479932 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-9jxjt" podStartSLOduration=2.479903162 podStartE2EDuration="2.479903162s" podCreationTimestamp="2026-02-16 11:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:32:10.450588286 +0000 UTC m=+1520.079922451" watchObservedRunningTime="2026-02-16 11:32:10.479903162 +0000 UTC m=+1520.109237327" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.539921 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-575a-account-create-update-v2lxv"] Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.575339 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.608183 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.624292 4949 scope.go:117] "RemoveContainer" containerID="6e14183673e66ee7f99da970e9998cd1900b24e724e8678ee11c26b965919ae4" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.633070 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:32:10 crc kubenswrapper[4949]: E0216 11:32:10.634158 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-httpd" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.637268 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-httpd" Feb 16 11:32:10 crc kubenswrapper[4949]: E0216 11:32:10.637368 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-log" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.637381 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-log" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.637874 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-httpd" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.637893 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5b65741-25f3-43db-a544-85997388cfea" containerName="glance-log" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.640645 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.662363 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.662594 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.675742 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.861029 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.861450 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.861566 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-logs\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.861755 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-scripts\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.861877 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.861998 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqt9s\" (UniqueName: \"kubernetes.io/projected/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-kube-api-access-lqt9s\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.862091 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.862251 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-config-data\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.965303 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-config-data\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.965476 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.965564 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.965646 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-logs\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.965706 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-scripts\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.965733 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.965759 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqt9s\" (UniqueName: \"kubernetes.io/projected/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-kube-api-access-lqt9s\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.965777 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.966942 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-logs\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.967489 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.975905 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.981118 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-scripts\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.988079 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:10 crc kubenswrapper[4949]: I0216 11:32:10.988645 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-config-data\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.002126 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqt9s\" (UniqueName: \"kubernetes.io/projected/b9a6f88b-57b3-4f64-a4aa-84d0529fdf82-kube-api-access-lqt9s\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.025900 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.025950 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/459835082c7f1564eb79959c4b82a2b1818ffad6c5c0d3df291e5e1cf38dd0a7/globalmount\"" pod="openstack/glance-default-external-api-0" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.113654 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8lkkq"] Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.135814 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g6wsf"] Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.224045 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.290669 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5b65741-25f3-43db-a544-85997388cfea" path="/var/lib/kubelet/pods/a5b65741-25f3-43db-a544-85997388cfea/volumes" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.292272 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a249-account-create-update-9lm5t"] Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.299650 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-581e-account-create-update-hnv29"] Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.566755 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c8fa8abd-35cd-473b-b870-e4bd2cd177cf\") pod \"glance-default-external-api-0\" (UID: \"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82\") " pod="openstack/glance-default-external-api-0" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.567687 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7czt8" event={"ID":"c894b203-cec9-49e4-9b92-bdf185ad11fa","Type":"ContainerStarted","Data":"2bd6945801a3e675d9463627f1a464b5f974acf2788de57ced895020bc47a68d"} Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.573018 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8lkkq" event={"ID":"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d","Type":"ContainerStarted","Data":"82b7375203d9e82efd6ef0ef60508799a1e6a9f03bb4f22dab50b2e66dc399f8"} Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.583067 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" event={"ID":"d0913052-0fd5-45bf-a268-47dc86f7af86","Type":"ContainerStarted","Data":"6957fdbb8ff138197e69eacac4bbe155329e3318f62a23edd4009d82068dfa66"} Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.627990 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9jxjt" event={"ID":"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88","Type":"ContainerStarted","Data":"0710d8646b09c50a7dbd800d1acadd884e58e234ee8e7ac07d53107b4e3acb1c"} Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.685880 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-581e-account-create-update-hnv29" event={"ID":"80642c6c-aa06-4c35-ab96-3c2114889ee5","Type":"ContainerStarted","Data":"9a3f807bd5986f5e326fff0e305f3d3cfdd692f212ff55b1ed0d99a645880b96"} Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.691358 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.745516 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g6wsf" event={"ID":"91104758-7e02-4761-bc39-fbca029cda0f","Type":"ContainerStarted","Data":"c402755e8d8403d1ea26da2b690414f83754823c8798ca2757b6ddd22ae9dbdc"} Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.772232 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-575a-account-create-update-v2lxv" event={"ID":"2394da0b-9528-4d78-9fca-bf224b27ad5b","Type":"ContainerStarted","Data":"2712003b76f8abb1834ec51cbe36c4cf99da493caf9b3d6e36d310d2cbcebe06"} Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.824750 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-575a-account-create-update-v2lxv" podStartSLOduration=3.824718736 podStartE2EDuration="3.824718736s" podCreationTimestamp="2026-02-16 11:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:32:11.797156079 +0000 UTC m=+1521.426490264" watchObservedRunningTime="2026-02-16 11:32:11.824718736 +0000 UTC m=+1521.454052901" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.903289 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-7b8694cbb9-kdjtx" Feb 16 11:32:11 crc kubenswrapper[4949]: I0216 11:32:11.965309 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.003646 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-c9d48d96d-4b894"] Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.003886 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-c9d48d96d-4b894" podUID="90b83c30-3793-4e05-80d4-3a714ad09404" containerName="heat-engine" containerID="cri-o://65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab" gracePeriod=60 Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.089561 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-config-data\") pod \"495daabb-a227-4235-ace3-6caae6936da4\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.089781 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"495daabb-a227-4235-ace3-6caae6936da4\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.089994 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-combined-ca-bundle\") pod \"495daabb-a227-4235-ace3-6caae6936da4\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.090073 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-internal-tls-certs\") pod \"495daabb-a227-4235-ace3-6caae6936da4\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.090141 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-httpd-run\") pod \"495daabb-a227-4235-ace3-6caae6936da4\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.090188 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-scripts\") pod \"495daabb-a227-4235-ace3-6caae6936da4\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.090346 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45dz5\" (UniqueName: \"kubernetes.io/projected/495daabb-a227-4235-ace3-6caae6936da4-kube-api-access-45dz5\") pod \"495daabb-a227-4235-ace3-6caae6936da4\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.090370 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-logs\") pod \"495daabb-a227-4235-ace3-6caae6936da4\" (UID: \"495daabb-a227-4235-ace3-6caae6936da4\") " Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.092578 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "495daabb-a227-4235-ace3-6caae6936da4" (UID: "495daabb-a227-4235-ace3-6caae6936da4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.099373 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-logs" (OuterVolumeSpecName: "logs") pod "495daabb-a227-4235-ace3-6caae6936da4" (UID: "495daabb-a227-4235-ace3-6caae6936da4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.158720 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-scripts" (OuterVolumeSpecName: "scripts") pod "495daabb-a227-4235-ace3-6caae6936da4" (UID: "495daabb-a227-4235-ace3-6caae6936da4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.173241 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/495daabb-a227-4235-ace3-6caae6936da4-kube-api-access-45dz5" (OuterVolumeSpecName: "kube-api-access-45dz5") pod "495daabb-a227-4235-ace3-6caae6936da4" (UID: "495daabb-a227-4235-ace3-6caae6936da4"). InnerVolumeSpecName "kube-api-access-45dz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.193586 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45dz5\" (UniqueName: \"kubernetes.io/projected/495daabb-a227-4235-ace3-6caae6936da4-kube-api-access-45dz5\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.193615 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.193625 4949 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/495daabb-a227-4235-ace3-6caae6936da4-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.193635 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.808695 4949 generic.go:334] "Generic (PLEG): container finished" podID="c894b203-cec9-49e4-9b92-bdf185ad11fa" containerID="4b7d10972971dc5c3e73274936a9c4bc317a17e136c3159f6ea5903bc1b364a7" exitCode=0 Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.808771 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7czt8" event={"ID":"c894b203-cec9-49e4-9b92-bdf185ad11fa","Type":"ContainerDied","Data":"4b7d10972971dc5c3e73274936a9c4bc317a17e136c3159f6ea5903bc1b364a7"} Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.816801 4949 generic.go:334] "Generic (PLEG): container finished" podID="064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88" containerID="0710d8646b09c50a7dbd800d1acadd884e58e234ee8e7ac07d53107b4e3acb1c" exitCode=0 Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.816879 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9jxjt" event={"ID":"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88","Type":"ContainerDied","Data":"0710d8646b09c50a7dbd800d1acadd884e58e234ee8e7ac07d53107b4e3acb1c"} Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.827853 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"495daabb-a227-4235-ace3-6caae6936da4","Type":"ContainerDied","Data":"fb297072926614665cadf16825deea23583fd91df765c6b2ce82fa6b5dec5a36"} Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.827924 4949 scope.go:117] "RemoveContainer" containerID="df915a5fb326ecb825acc3a5561fc8c91a49c4de269d1de2e19b6f78cd7f8925" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.828413 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:12 crc kubenswrapper[4949]: I0216 11:32:12.858530 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-575a-account-create-update-v2lxv" event={"ID":"2394da0b-9528-4d78-9fca-bf224b27ad5b","Type":"ContainerStarted","Data":"bbbf95ad18f30db9efb7633d53d034fcb6ac39b560cf6f60e8dbf1a829f95181"} Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.021866 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.037245 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "495daabb-a227-4235-ace3-6caae6936da4" (UID: "495daabb-a227-4235-ace3-6caae6936da4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.052249 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.083558 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.084310 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.273692 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a" (OuterVolumeSpecName: "glance") pod "495daabb-a227-4235-ace3-6caae6936da4" (UID: "495daabb-a227-4235-ace3-6caae6936da4"). InnerVolumeSpecName "pvc-27105715-b37b-455e-aa04-f095a035218a". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.367010 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") on node \"crc\" " Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.473512 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.473970 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-27105715-b37b-455e-aa04-f095a035218a" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a") on node "crc" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.473717 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "495daabb-a227-4235-ace3-6caae6936da4" (UID: "495daabb-a227-4235-ace3-6caae6936da4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.474568 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-config-data" (OuterVolumeSpecName: "config-data") pod "495daabb-a227-4235-ace3-6caae6936da4" (UID: "495daabb-a227-4235-ace3-6caae6936da4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.574729 4949 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.574774 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495daabb-a227-4235-ace3-6caae6936da4-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.574788 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.764446 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.776380 4949 scope.go:117] "RemoveContainer" containerID="f3bf7647a6b8f333884c019d6fe52e91fe985607e23774e732324644cc5a338e" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.783240 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-operator-scripts\") pod \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\" (UID: \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\") " Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.783305 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69sxz\" (UniqueName: \"kubernetes.io/projected/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-kube-api-access-69sxz\") pod \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\" (UID: \"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88\") " Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.784257 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88" (UID: "064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.785249 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.813800 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-kube-api-access-69sxz" (OuterVolumeSpecName: "kube-api-access-69sxz") pod "064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88" (UID: "064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88"). InnerVolumeSpecName "kube-api-access-69sxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.894699 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69sxz\" (UniqueName: \"kubernetes.io/projected/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88-kube-api-access-69sxz\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.993600 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82","Type":"ContainerStarted","Data":"6b09ffc0b4af5d1644549e5f0aacfa18294a7b335e72925a93a3836e16385bf5"} Feb 16 11:32:13 crc kubenswrapper[4949]: I0216 11:32:13.998385 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-581e-account-create-update-hnv29" event={"ID":"80642c6c-aa06-4c35-ab96-3c2114889ee5","Type":"ContainerStarted","Data":"ecd9cbd01aaea70701e6bff0939b9a10868831f1e0a00f0120c11b71f9653284"} Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.014047 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.027199 4949 generic.go:334] "Generic (PLEG): container finished" podID="91104758-7e02-4761-bc39-fbca029cda0f" containerID="cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd" exitCode=0 Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.027751 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g6wsf" event={"ID":"91104758-7e02-4761-bc39-fbca029cda0f","Type":"ContainerDied","Data":"cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd"} Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.043569 4949 generic.go:334] "Generic (PLEG): container finished" podID="2394da0b-9528-4d78-9fca-bf224b27ad5b" containerID="bbbf95ad18f30db9efb7633d53d034fcb6ac39b560cf6f60e8dbf1a829f95181" exitCode=0 Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.043749 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-575a-account-create-update-v2lxv" event={"ID":"2394da0b-9528-4d78-9fca-bf224b27ad5b","Type":"ContainerDied","Data":"bbbf95ad18f30db9efb7633d53d034fcb6ac39b560cf6f60e8dbf1a829f95181"} Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.048967 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8lkkq" event={"ID":"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d","Type":"ContainerStarted","Data":"43107383f85e8ca898ba418ec983e30012621aedda886ca3f7b3cda7d3ff5f04"} Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.068307 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" event={"ID":"d0913052-0fd5-45bf-a268-47dc86f7af86","Type":"ContainerStarted","Data":"52daeb7bd25242f39a5e1bc367c4ffcc77b2cf612333d7a84f5d303f8e944ce9"} Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.094566 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9jxjt" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.095241 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9jxjt" event={"ID":"064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88","Type":"ContainerDied","Data":"3e6100b2a15e9237f1f89574d8931d9ae5825eb890973066b67067965cad1abc"} Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.095300 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e6100b2a15e9237f1f89574d8931d9ae5825eb890973066b67067965cad1abc" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.150663 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.178667 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:32:14 crc kubenswrapper[4949]: E0216 11:32:14.179495 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88" containerName="mariadb-database-create" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.179514 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88" containerName="mariadb-database-create" Feb 16 11:32:14 crc kubenswrapper[4949]: E0216 11:32:14.179573 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="495daabb-a227-4235-ace3-6caae6936da4" containerName="glance-httpd" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.179580 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="495daabb-a227-4235-ace3-6caae6936da4" containerName="glance-httpd" Feb 16 11:32:14 crc kubenswrapper[4949]: E0216 11:32:14.179605 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="495daabb-a227-4235-ace3-6caae6936da4" containerName="glance-log" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.179611 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="495daabb-a227-4235-ace3-6caae6936da4" containerName="glance-log" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.179846 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88" containerName="mariadb-database-create" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.179867 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="495daabb-a227-4235-ace3-6caae6936da4" containerName="glance-log" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.179904 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="495daabb-a227-4235-ace3-6caae6936da4" containerName="glance-httpd" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.182261 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.188409 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.188733 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.221721 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.238695 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-581e-account-create-update-hnv29" podStartSLOduration=6.238664595 podStartE2EDuration="6.238664595s" podCreationTimestamp="2026-02-16 11:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:32:14.021806515 +0000 UTC m=+1523.651140680" watchObservedRunningTime="2026-02-16 11:32:14.238664595 +0000 UTC m=+1523.867998760" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.262368 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-sf64k" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="registry-server" probeResult="failure" output=< Feb 16 11:32:14 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:32:14 crc kubenswrapper[4949]: > Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.276411 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/394f965e-6650-4b88-91f3-b93a1bf0efa7-logs\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.276471 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgbvk\" (UniqueName: \"kubernetes.io/projected/394f965e-6650-4b88-91f3-b93a1bf0efa7-kube-api-access-kgbvk\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.276581 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.276751 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/394f965e-6650-4b88-91f3-b93a1bf0efa7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.276792 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.276836 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.276895 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.277010 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.349962 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" podStartSLOduration=6.349939511 podStartE2EDuration="6.349939511s" podCreationTimestamp="2026-02-16 11:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:32:14.165826496 +0000 UTC m=+1523.795160671" watchObservedRunningTime="2026-02-16 11:32:14.349939511 +0000 UTC m=+1523.979273676" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.379605 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.380120 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/394f965e-6650-4b88-91f3-b93a1bf0efa7-logs\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.380186 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgbvk\" (UniqueName: \"kubernetes.io/projected/394f965e-6650-4b88-91f3-b93a1bf0efa7-kube-api-access-kgbvk\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.380246 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.380378 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/394f965e-6650-4b88-91f3-b93a1bf0efa7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.380413 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.380460 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.380520 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.382839 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/394f965e-6650-4b88-91f3-b93a1bf0efa7-logs\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.388908 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/394f965e-6650-4b88-91f3-b93a1bf0efa7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.393726 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.393775 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/eecfdadb463f5cbe330cd09c7447d0c2f21fd30b4fa967afbb7cd97dad6544d3/globalmount\"" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.412444 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.416852 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.433614 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.458470 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-566c9b565f-fv7vz" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.464608 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgbvk\" (UniqueName: \"kubernetes.io/projected/394f965e-6650-4b88-91f3-b93a1bf0efa7-kube-api-access-kgbvk\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.469623 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394f965e-6650-4b88-91f3-b93a1bf0efa7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.484292 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-5cdc74fffc-n2hcr" Feb 16 11:32:14 crc kubenswrapper[4949]: E0216 11:32:14.546714 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Feb 16 11:32:14 crc kubenswrapper[4949]: E0216 11:32:14.609409 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.609687 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-5489d8946b-q8mgt"] Feb 16 11:32:14 crc kubenswrapper[4949]: E0216 11:32:14.617844 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Feb 16 11:32:14 crc kubenswrapper[4949]: E0216 11:32:14.617935 4949 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-c9d48d96d-4b894" podUID="90b83c30-3793-4e05-80d4-3a714ad09404" containerName="heat-engine" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.679060 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5d4bdcc77b-tmvps"] Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.679101 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-27105715-b37b-455e-aa04-f095a035218a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-27105715-b37b-455e-aa04-f095a035218a\") pod \"glance-default-internal-api-0\" (UID: \"394f965e-6650-4b88-91f3-b93a1bf0efa7\") " pod="openstack/glance-default-internal-api-0" Feb 16 11:32:14 crc kubenswrapper[4949]: I0216 11:32:14.829040 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.126583 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.180250 4949 generic.go:334] "Generic (PLEG): container finished" podID="d0913052-0fd5-45bf-a268-47dc86f7af86" containerID="52daeb7bd25242f39a5e1bc367c4ffcc77b2cf612333d7a84f5d303f8e944ce9" exitCode=0 Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.180655 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" event={"ID":"d0913052-0fd5-45bf-a268-47dc86f7af86","Type":"ContainerDied","Data":"52daeb7bd25242f39a5e1bc367c4ffcc77b2cf612333d7a84f5d303f8e944ce9"} Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.209498 4949 generic.go:334] "Generic (PLEG): container finished" podID="80642c6c-aa06-4c35-ab96-3c2114889ee5" containerID="ecd9cbd01aaea70701e6bff0939b9a10868831f1e0a00f0120c11b71f9653284" exitCode=0 Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.209712 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-581e-account-create-update-hnv29" event={"ID":"80642c6c-aa06-4c35-ab96-3c2114889ee5","Type":"ContainerDied","Data":"ecd9cbd01aaea70701e6bff0939b9a10868831f1e0a00f0120c11b71f9653284"} Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.259775 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c894b203-cec9-49e4-9b92-bdf185ad11fa-operator-scripts\") pod \"c894b203-cec9-49e4-9b92-bdf185ad11fa\" (UID: \"c894b203-cec9-49e4-9b92-bdf185ad11fa\") " Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.260199 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h548g\" (UniqueName: \"kubernetes.io/projected/c894b203-cec9-49e4-9b92-bdf185ad11fa-kube-api-access-h548g\") pod \"c894b203-cec9-49e4-9b92-bdf185ad11fa\" (UID: \"c894b203-cec9-49e4-9b92-bdf185ad11fa\") " Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.260309 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c894b203-cec9-49e4-9b92-bdf185ad11fa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c894b203-cec9-49e4-9b92-bdf185ad11fa" (UID: "c894b203-cec9-49e4-9b92-bdf185ad11fa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.261222 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c894b203-cec9-49e4-9b92-bdf185ad11fa-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.273400 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7czt8" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.290350 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c894b203-cec9-49e4-9b92-bdf185ad11fa-kube-api-access-h548g" (OuterVolumeSpecName: "kube-api-access-h548g") pod "c894b203-cec9-49e4-9b92-bdf185ad11fa" (UID: "c894b203-cec9-49e4-9b92-bdf185ad11fa"). InnerVolumeSpecName "kube-api-access-h548g". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.294997 4949 generic.go:334] "Generic (PLEG): container finished" podID="ead190f2-2e52-47d7-a1d1-9d4f19c83e5d" containerID="43107383f85e8ca898ba418ec983e30012621aedda886ca3f7b3cda7d3ff5f04" exitCode=0 Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.298531 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="495daabb-a227-4235-ace3-6caae6936da4" path="/var/lib/kubelet/pods/495daabb-a227-4235-ace3-6caae6936da4/volumes" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.299980 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7czt8" event={"ID":"c894b203-cec9-49e4-9b92-bdf185ad11fa","Type":"ContainerDied","Data":"2bd6945801a3e675d9463627f1a464b5f974acf2788de57ced895020bc47a68d"} Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.300018 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bd6945801a3e675d9463627f1a464b5f974acf2788de57ced895020bc47a68d" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.300033 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8lkkq" event={"ID":"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d","Type":"ContainerDied","Data":"43107383f85e8ca898ba418ec983e30012621aedda886ca3f7b3cda7d3ff5f04"} Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.372766 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h548g\" (UniqueName: \"kubernetes.io/projected/c894b203-cec9-49e4-9b92-bdf185ad11fa-kube-api-access-h548g\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.893952 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:32:15 crc kubenswrapper[4949]: I0216 11:32:15.898901 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.006222 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data-custom\") pod \"e4998389-d4f4-44f0-b048-988b96e27acc\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.006345 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data-custom\") pod \"c53f95c1-224b-4026-9e51-9a2677621865\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.006374 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-combined-ca-bundle\") pod \"e4998389-d4f4-44f0-b048-988b96e27acc\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.006462 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmsv8\" (UniqueName: \"kubernetes.io/projected/e4998389-d4f4-44f0-b048-988b96e27acc-kube-api-access-xmsv8\") pod \"e4998389-d4f4-44f0-b048-988b96e27acc\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.006495 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-combined-ca-bundle\") pod \"c53f95c1-224b-4026-9e51-9a2677621865\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.006550 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data\") pod \"e4998389-d4f4-44f0-b048-988b96e27acc\" (UID: \"e4998389-d4f4-44f0-b048-988b96e27acc\") " Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.006704 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wdms\" (UniqueName: \"kubernetes.io/projected/c53f95c1-224b-4026-9e51-9a2677621865-kube-api-access-5wdms\") pod \"c53f95c1-224b-4026-9e51-9a2677621865\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.006724 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data\") pod \"c53f95c1-224b-4026-9e51-9a2677621865\" (UID: \"c53f95c1-224b-4026-9e51-9a2677621865\") " Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.077362 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e4998389-d4f4-44f0-b048-988b96e27acc" (UID: "e4998389-d4f4-44f0-b048-988b96e27acc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.109477 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c53f95c1-224b-4026-9e51-9a2677621865-kube-api-access-5wdms" (OuterVolumeSpecName: "kube-api-access-5wdms") pod "c53f95c1-224b-4026-9e51-9a2677621865" (UID: "c53f95c1-224b-4026-9e51-9a2677621865"). InnerVolumeSpecName "kube-api-access-5wdms". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.111444 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wdms\" (UniqueName: \"kubernetes.io/projected/c53f95c1-224b-4026-9e51-9a2677621865-kube-api-access-5wdms\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.111463 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.137879 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c53f95c1-224b-4026-9e51-9a2677621865" (UID: "c53f95c1-224b-4026-9e51-9a2677621865"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.143202 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4998389-d4f4-44f0-b048-988b96e27acc-kube-api-access-xmsv8" (OuterVolumeSpecName: "kube-api-access-xmsv8") pod "e4998389-d4f4-44f0-b048-988b96e27acc" (UID: "e4998389-d4f4-44f0-b048-988b96e27acc"). InnerVolumeSpecName "kube-api-access-xmsv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.213829 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.213878 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmsv8\" (UniqueName: \"kubernetes.io/projected/e4998389-d4f4-44f0-b048-988b96e27acc-kube-api-access-xmsv8\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.247447 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data" (OuterVolumeSpecName: "config-data") pod "e4998389-d4f4-44f0-b048-988b96e27acc" (UID: "e4998389-d4f4-44f0-b048-988b96e27acc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.262769 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4998389-d4f4-44f0-b048-988b96e27acc" (UID: "e4998389-d4f4-44f0-b048-988b96e27acc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.286582 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c53f95c1-224b-4026-9e51-9a2677621865" (UID: "c53f95c1-224b-4026-9e51-9a2677621865"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.325554 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.325585 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.325598 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4998389-d4f4-44f0-b048-988b96e27acc-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.373943 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d4bdcc77b-tmvps" event={"ID":"c53f95c1-224b-4026-9e51-9a2677621865","Type":"ContainerDied","Data":"8ccbcbad02cbf5527c493251eeb13a65658a5ea6c64c69cfef5cad7b0f649092"} Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.374002 4949 scope.go:117] "RemoveContainer" containerID="32b045667e36f74a7779b74a962feda63727f099cad34d3cb0b14940de390174" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.374121 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d4bdcc77b-tmvps" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.387398 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.389274 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5489d8946b-q8mgt" event={"ID":"e4998389-d4f4-44f0-b048-988b96e27acc","Type":"ContainerDied","Data":"a727c4d866f78125d4f3a29b3f1533841a9284c1167f972f49cfe7e34f518847"} Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.408510 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data" (OuterVolumeSpecName: "config-data") pod "c53f95c1-224b-4026-9e51-9a2677621865" (UID: "c53f95c1-224b-4026-9e51-9a2677621865"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.444212 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53f95c1-224b-4026-9e51-9a2677621865-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.534579 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-5489d8946b-q8mgt"] Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.649464 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-5489d8946b-q8mgt"] Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.672590 4949 scope.go:117] "RemoveContainer" containerID="d3fb4ce85ec36b9715e39d392f106a95a1d269d6347c50b0e741b83a54fa5188" Feb 16 11:32:16 crc kubenswrapper[4949]: I0216 11:32:16.819153 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.034292 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.065098 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5d4bdcc77b-tmvps"] Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.084927 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.098263 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5d4bdcc77b-tmvps"] Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.119194 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-operator-scripts\") pod \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\" (UID: \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\") " Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.119370 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gnc4\" (UniqueName: \"kubernetes.io/projected/2394da0b-9528-4d78-9fca-bf224b27ad5b-kube-api-access-6gnc4\") pod \"2394da0b-9528-4d78-9fca-bf224b27ad5b\" (UID: \"2394da0b-9528-4d78-9fca-bf224b27ad5b\") " Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.119462 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2394da0b-9528-4d78-9fca-bf224b27ad5b-operator-scripts\") pod \"2394da0b-9528-4d78-9fca-bf224b27ad5b\" (UID: \"2394da0b-9528-4d78-9fca-bf224b27ad5b\") " Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.119511 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wl4j\" (UniqueName: \"kubernetes.io/projected/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-kube-api-access-6wl4j\") pod \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\" (UID: \"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d\") " Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.121148 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2394da0b-9528-4d78-9fca-bf224b27ad5b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2394da0b-9528-4d78-9fca-bf224b27ad5b" (UID: "2394da0b-9528-4d78-9fca-bf224b27ad5b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.124495 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ead190f2-2e52-47d7-a1d1-9d4f19c83e5d" (UID: "ead190f2-2e52-47d7-a1d1-9d4f19c83e5d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.130577 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-kube-api-access-6wl4j" (OuterVolumeSpecName: "kube-api-access-6wl4j") pod "ead190f2-2e52-47d7-a1d1-9d4f19c83e5d" (UID: "ead190f2-2e52-47d7-a1d1-9d4f19c83e5d"). InnerVolumeSpecName "kube-api-access-6wl4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.131478 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2394da0b-9528-4d78-9fca-bf224b27ad5b-kube-api-access-6gnc4" (OuterVolumeSpecName: "kube-api-access-6gnc4") pod "2394da0b-9528-4d78-9fca-bf224b27ad5b" (UID: "2394da0b-9528-4d78-9fca-bf224b27ad5b"). InnerVolumeSpecName "kube-api-access-6gnc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.225358 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.225408 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gnc4\" (UniqueName: \"kubernetes.io/projected/2394da0b-9528-4d78-9fca-bf224b27ad5b-kube-api-access-6gnc4\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.225418 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2394da0b-9528-4d78-9fca-bf224b27ad5b-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.225427 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wl4j\" (UniqueName: \"kubernetes.io/projected/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d-kube-api-access-6wl4j\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.278229 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c53f95c1-224b-4026-9e51-9a2677621865" path="/var/lib/kubelet/pods/c53f95c1-224b-4026-9e51-9a2677621865/volumes" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.278833 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" path="/var/lib/kubelet/pods/e4998389-d4f4-44f0-b048-988b96e27acc/volumes" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.306730 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.327737 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0913052-0fd5-45bf-a268-47dc86f7af86-operator-scripts\") pod \"d0913052-0fd5-45bf-a268-47dc86f7af86\" (UID: \"d0913052-0fd5-45bf-a268-47dc86f7af86\") " Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.327821 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkrj8\" (UniqueName: \"kubernetes.io/projected/d0913052-0fd5-45bf-a268-47dc86f7af86-kube-api-access-pkrj8\") pod \"d0913052-0fd5-45bf-a268-47dc86f7af86\" (UID: \"d0913052-0fd5-45bf-a268-47dc86f7af86\") " Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.328602 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0913052-0fd5-45bf-a268-47dc86f7af86-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d0913052-0fd5-45bf-a268-47dc86f7af86" (UID: "d0913052-0fd5-45bf-a268-47dc86f7af86"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.329286 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0913052-0fd5-45bf-a268-47dc86f7af86-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.338123 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0913052-0fd5-45bf-a268-47dc86f7af86-kube-api-access-pkrj8" (OuterVolumeSpecName: "kube-api-access-pkrj8") pod "d0913052-0fd5-45bf-a268-47dc86f7af86" (UID: "d0913052-0fd5-45bf-a268-47dc86f7af86"). InnerVolumeSpecName "kube-api-access-pkrj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.434329 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkrj8\" (UniqueName: \"kubernetes.io/projected/d0913052-0fd5-45bf-a268-47dc86f7af86-kube-api-access-pkrj8\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.502667 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.521385 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-581e-account-create-update-hnv29" event={"ID":"80642c6c-aa06-4c35-ab96-3c2114889ee5","Type":"ContainerDied","Data":"9a3f807bd5986f5e326fff0e305f3d3cfdd692f212ff55b1ed0d99a645880b96"} Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.521725 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a3f807bd5986f5e326fff0e305f3d3cfdd692f212ff55b1ed0d99a645880b96" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.536800 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5dd2\" (UniqueName: \"kubernetes.io/projected/80642c6c-aa06-4c35-ab96-3c2114889ee5-kube-api-access-w5dd2\") pod \"80642c6c-aa06-4c35-ab96-3c2114889ee5\" (UID: \"80642c6c-aa06-4c35-ab96-3c2114889ee5\") " Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.536870 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80642c6c-aa06-4c35-ab96-3c2114889ee5-operator-scripts\") pod \"80642c6c-aa06-4c35-ab96-3c2114889ee5\" (UID: \"80642c6c-aa06-4c35-ab96-3c2114889ee5\") " Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.538512 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80642c6c-aa06-4c35-ab96-3c2114889ee5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "80642c6c-aa06-4c35-ab96-3c2114889ee5" (UID: "80642c6c-aa06-4c35-ab96-3c2114889ee5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.538538 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8lkkq" event={"ID":"ead190f2-2e52-47d7-a1d1-9d4f19c83e5d","Type":"ContainerDied","Data":"82b7375203d9e82efd6ef0ef60508799a1e6a9f03bb4f22dab50b2e66dc399f8"} Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.538579 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82b7375203d9e82efd6ef0ef60508799a1e6a9f03bb4f22dab50b2e66dc399f8" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.538642 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8lkkq" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.553105 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80642c6c-aa06-4c35-ab96-3c2114889ee5-kube-api-access-w5dd2" (OuterVolumeSpecName: "kube-api-access-w5dd2") pod "80642c6c-aa06-4c35-ab96-3c2114889ee5" (UID: "80642c6c-aa06-4c35-ab96-3c2114889ee5"). InnerVolumeSpecName "kube-api-access-w5dd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.593835 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" event={"ID":"d0913052-0fd5-45bf-a268-47dc86f7af86","Type":"ContainerDied","Data":"6957fdbb8ff138197e69eacac4bbe155329e3318f62a23edd4009d82068dfa66"} Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.593919 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6957fdbb8ff138197e69eacac4bbe155329e3318f62a23edd4009d82068dfa66" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.594066 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a249-account-create-update-9lm5t" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.617129 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82","Type":"ContainerStarted","Data":"002ba9df29cc4cbd23b825f58333128a605b98118de8c22351b119c022ebc879"} Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.622784 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g6wsf" event={"ID":"91104758-7e02-4761-bc39-fbca029cda0f","Type":"ContainerStarted","Data":"3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392"} Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.639382 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-575a-account-create-update-v2lxv" event={"ID":"2394da0b-9528-4d78-9fca-bf224b27ad5b","Type":"ContainerDied","Data":"2712003b76f8abb1834ec51cbe36c4cf99da493caf9b3d6e36d310d2cbcebe06"} Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.639430 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2712003b76f8abb1834ec51cbe36c4cf99da493caf9b3d6e36d310d2cbcebe06" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.639510 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-575a-account-create-update-v2lxv" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.644130 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5dd2\" (UniqueName: \"kubernetes.io/projected/80642c6c-aa06-4c35-ab96-3c2114889ee5-kube-api-access-w5dd2\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.644188 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/80642c6c-aa06-4c35-ab96-3c2114889ee5-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:17 crc kubenswrapper[4949]: I0216 11:32:17.646470 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"394f965e-6650-4b88-91f3-b93a1bf0efa7","Type":"ContainerStarted","Data":"f81f119ed00b752d908f0ef849be877d9bc4e5a5ec602bc7081d1a49c9892266"} Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.690371 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-581e-account-create-update-hnv29" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.690395 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"394f965e-6650-4b88-91f3-b93a1bf0efa7","Type":"ContainerStarted","Data":"69c7720331b9a7d8ebafa9e75347ea8855b32f09f263287bad9738569d7c4435"} Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.893380 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rv8d4"] Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894037 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80642c6c-aa06-4c35-ab96-3c2114889ee5" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894054 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="80642c6c-aa06-4c35-ab96-3c2114889ee5" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894074 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" containerName="heat-cfnapi" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894080 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" containerName="heat-cfnapi" Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894093 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c53f95c1-224b-4026-9e51-9a2677621865" containerName="heat-api" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894099 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c53f95c1-224b-4026-9e51-9a2677621865" containerName="heat-api" Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894111 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c53f95c1-224b-4026-9e51-9a2677621865" containerName="heat-api" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894117 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c53f95c1-224b-4026-9e51-9a2677621865" containerName="heat-api" Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894128 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2394da0b-9528-4d78-9fca-bf224b27ad5b" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894134 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="2394da0b-9528-4d78-9fca-bf224b27ad5b" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894155 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" containerName="heat-cfnapi" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894161 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" containerName="heat-cfnapi" Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894194 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ead190f2-2e52-47d7-a1d1-9d4f19c83e5d" containerName="mariadb-database-create" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894200 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="ead190f2-2e52-47d7-a1d1-9d4f19c83e5d" containerName="mariadb-database-create" Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894218 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0913052-0fd5-45bf-a268-47dc86f7af86" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894225 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0913052-0fd5-45bf-a268-47dc86f7af86" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: E0216 11:32:18.894241 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c894b203-cec9-49e4-9b92-bdf185ad11fa" containerName="mariadb-database-create" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.894250 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c894b203-cec9-49e4-9b92-bdf185ad11fa" containerName="mariadb-database-create" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.895029 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c894b203-cec9-49e4-9b92-bdf185ad11fa" containerName="mariadb-database-create" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.895051 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0913052-0fd5-45bf-a268-47dc86f7af86" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.895060 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" containerName="heat-cfnapi" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.895078 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="ead190f2-2e52-47d7-a1d1-9d4f19c83e5d" containerName="mariadb-database-create" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.895089 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="80642c6c-aa06-4c35-ab96-3c2114889ee5" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.895101 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c53f95c1-224b-4026-9e51-9a2677621865" containerName="heat-api" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.895113 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="2394da0b-9528-4d78-9fca-bf224b27ad5b" containerName="mariadb-account-create-update" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.895121 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c53f95c1-224b-4026-9e51-9a2677621865" containerName="heat-api" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.896130 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.900371 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.900689 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.900886 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-qjd76" Feb 16 11:32:18 crc kubenswrapper[4949]: I0216 11:32:18.923653 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rv8d4"] Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.023336 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x8xn\" (UniqueName: \"kubernetes.io/projected/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-kube-api-access-8x8xn\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.024036 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-config-data\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.024091 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-scripts\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.024257 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.128620 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x8xn\" (UniqueName: \"kubernetes.io/projected/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-kube-api-access-8x8xn\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.128778 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-config-data\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.128819 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-scripts\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.128915 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.135516 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-scripts\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.138300 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.148018 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-config-data\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.148931 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x8xn\" (UniqueName: \"kubernetes.io/projected/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-kube-api-access-8x8xn\") pod \"nova-cell0-conductor-db-sync-rv8d4\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.267983 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:19 crc kubenswrapper[4949]: I0216 11:32:19.755612 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9a6f88b-57b3-4f64-a4aa-84d0529fdf82","Type":"ContainerStarted","Data":"baf2feb4ed9ca7ccdd0954385019657eb338300c3f151dd6ac58846ce5d9a596"} Feb 16 11:32:20 crc kubenswrapper[4949]: I0216 11:32:20.464456 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.464423889 podStartE2EDuration="10.464423889s" podCreationTimestamp="2026-02-16 11:32:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:32:19.806623214 +0000 UTC m=+1529.435957379" watchObservedRunningTime="2026-02-16 11:32:20.464423889 +0000 UTC m=+1530.093758054" Feb 16 11:32:20 crc kubenswrapper[4949]: I0216 11:32:20.469232 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rv8d4"] Feb 16 11:32:20 crc kubenswrapper[4949]: I0216 11:32:20.771773 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"394f965e-6650-4b88-91f3-b93a1bf0efa7","Type":"ContainerStarted","Data":"0a3ed3a0f5581a8ec6cfc528fa55d15de184ef3fc11eeb0b9f558bbe07bb82f1"} Feb 16 11:32:20 crc kubenswrapper[4949]: I0216 11:32:20.774436 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" event={"ID":"0794dfd5-3a8f-4e35-bb3e-2dc32881680d","Type":"ContainerStarted","Data":"def0b4871a984223bbeb5422cb7e7189ee521068235a7384de75f97e247dde34"} Feb 16 11:32:20 crc kubenswrapper[4949]: I0216 11:32:20.802958 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.80292818 podStartE2EDuration="7.80292818s" podCreationTimestamp="2026-02-16 11:32:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:32:20.794601733 +0000 UTC m=+1530.423935908" watchObservedRunningTime="2026-02-16 11:32:20.80292818 +0000 UTC m=+1530.432262345" Feb 16 11:32:21 crc kubenswrapper[4949]: I0216 11:32:21.692952 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 16 11:32:21 crc kubenswrapper[4949]: I0216 11:32:21.693252 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 16 11:32:21 crc kubenswrapper[4949]: I0216 11:32:21.765814 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 16 11:32:21 crc kubenswrapper[4949]: I0216 11:32:21.778101 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 16 11:32:21 crc kubenswrapper[4949]: I0216 11:32:21.816404 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 16 11:32:21 crc kubenswrapper[4949]: I0216 11:32:21.816470 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.628025 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.786197 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data\") pod \"90b83c30-3793-4e05-80d4-3a714ad09404\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.786249 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-combined-ca-bundle\") pod \"90b83c30-3793-4e05-80d4-3a714ad09404\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.786327 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6w422\" (UniqueName: \"kubernetes.io/projected/90b83c30-3793-4e05-80d4-3a714ad09404-kube-api-access-6w422\") pod \"90b83c30-3793-4e05-80d4-3a714ad09404\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.786517 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data-custom\") pod \"90b83c30-3793-4e05-80d4-3a714ad09404\" (UID: \"90b83c30-3793-4e05-80d4-3a714ad09404\") " Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.796115 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "90b83c30-3793-4e05-80d4-3a714ad09404" (UID: "90b83c30-3793-4e05-80d4-3a714ad09404"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.796262 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90b83c30-3793-4e05-80d4-3a714ad09404-kube-api-access-6w422" (OuterVolumeSpecName: "kube-api-access-6w422") pod "90b83c30-3793-4e05-80d4-3a714ad09404" (UID: "90b83c30-3793-4e05-80d4-3a714ad09404"). InnerVolumeSpecName "kube-api-access-6w422". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.828457 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "90b83c30-3793-4e05-80d4-3a714ad09404" (UID: "90b83c30-3793-4e05-80d4-3a714ad09404"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.829396 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-c9d48d96d-4b894" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.829472 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-c9d48d96d-4b894" event={"ID":"90b83c30-3793-4e05-80d4-3a714ad09404","Type":"ContainerDied","Data":"65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab"} Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.829565 4949 scope.go:117] "RemoveContainer" containerID="65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.829293 4949 generic.go:334] "Generic (PLEG): container finished" podID="90b83c30-3793-4e05-80d4-3a714ad09404" containerID="65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab" exitCode=0 Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.830334 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-c9d48d96d-4b894" event={"ID":"90b83c30-3793-4e05-80d4-3a714ad09404","Type":"ContainerDied","Data":"ea9aa67ca6d786c3ee39098ed21c5dd3ddfdb62d4fb96fb9892db9dde7fac11c"} Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.840113 4949 generic.go:334] "Generic (PLEG): container finished" podID="91104758-7e02-4761-bc39-fbca029cda0f" containerID="3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392" exitCode=0 Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.845032 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g6wsf" event={"ID":"91104758-7e02-4761-bc39-fbca029cda0f","Type":"ContainerDied","Data":"3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392"} Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.870533 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data" (OuterVolumeSpecName: "config-data") pod "90b83c30-3793-4e05-80d4-3a714ad09404" (UID: "90b83c30-3793-4e05-80d4-3a714ad09404"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.903070 4949 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.905612 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.906148 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90b83c30-3793-4e05-80d4-3a714ad09404-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.906367 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6w422\" (UniqueName: \"kubernetes.io/projected/90b83c30-3793-4e05-80d4-3a714ad09404-kube-api-access-6w422\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.983457 4949 scope.go:117] "RemoveContainer" containerID="65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab" Feb 16 11:32:22 crc kubenswrapper[4949]: E0216 11:32:22.984596 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab\": container with ID starting with 65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab not found: ID does not exist" containerID="65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab" Feb 16 11:32:22 crc kubenswrapper[4949]: I0216 11:32:22.984628 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab"} err="failed to get container status \"65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab\": rpc error: code = NotFound desc = could not find container \"65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab\": container with ID starting with 65360a1c96f37c12ea19eb83787de3ae89e75a16081c24720bcd2c6e45ce6aab not found: ID does not exist" Feb 16 11:32:23 crc kubenswrapper[4949]: I0216 11:32:23.175981 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:23 crc kubenswrapper[4949]: I0216 11:32:23.181345 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-c9d48d96d-4b894"] Feb 16 11:32:23 crc kubenswrapper[4949]: I0216 11:32:23.200514 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-c9d48d96d-4b894"] Feb 16 11:32:23 crc kubenswrapper[4949]: I0216 11:32:23.253834 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90b83c30-3793-4e05-80d4-3a714ad09404" path="/var/lib/kubelet/pods/90b83c30-3793-4e05-80d4-3a714ad09404/volumes" Feb 16 11:32:23 crc kubenswrapper[4949]: I0216 11:32:23.254748 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:23 crc kubenswrapper[4949]: I0216 11:32:23.936311 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g6wsf" event={"ID":"91104758-7e02-4761-bc39-fbca029cda0f","Type":"ContainerStarted","Data":"d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0"} Feb 16 11:32:23 crc kubenswrapper[4949]: I0216 11:32:23.972630 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g6wsf" podStartSLOduration=6.697297455 podStartE2EDuration="15.972611519s" podCreationTimestamp="2026-02-16 11:32:08 +0000 UTC" firstStartedPulling="2026-02-16 11:32:14.089059385 +0000 UTC m=+1523.718393550" lastFinishedPulling="2026-02-16 11:32:23.364373449 +0000 UTC m=+1532.993707614" observedRunningTime="2026-02-16 11:32:23.971805966 +0000 UTC m=+1533.601140141" watchObservedRunningTime="2026-02-16 11:32:23.972611519 +0000 UTC m=+1533.601945684" Feb 16 11:32:24 crc kubenswrapper[4949]: I0216 11:32:24.120140 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sf64k"] Feb 16 11:32:24 crc kubenswrapper[4949]: I0216 11:32:24.832822 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:24 crc kubenswrapper[4949]: I0216 11:32:24.833099 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:24 crc kubenswrapper[4949]: I0216 11:32:24.890680 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:24 crc kubenswrapper[4949]: I0216 11:32:24.900949 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:24 crc kubenswrapper[4949]: I0216 11:32:24.962491 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sf64k" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="registry-server" containerID="cri-o://fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29" gracePeriod=2 Feb 16 11:32:24 crc kubenswrapper[4949]: I0216 11:32:24.963348 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:24 crc kubenswrapper[4949]: I0216 11:32:24.963928 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.672293 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.837941 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlvdx\" (UniqueName: \"kubernetes.io/projected/9d6c2dd5-e12e-427c-a512-496546dcc309-kube-api-access-mlvdx\") pod \"9d6c2dd5-e12e-427c-a512-496546dcc309\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.838022 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-utilities\") pod \"9d6c2dd5-e12e-427c-a512-496546dcc309\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.838214 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-catalog-content\") pod \"9d6c2dd5-e12e-427c-a512-496546dcc309\" (UID: \"9d6c2dd5-e12e-427c-a512-496546dcc309\") " Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.839052 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-utilities" (OuterVolumeSpecName: "utilities") pod "9d6c2dd5-e12e-427c-a512-496546dcc309" (UID: "9d6c2dd5-e12e-427c-a512-496546dcc309"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.847274 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d6c2dd5-e12e-427c-a512-496546dcc309-kube-api-access-mlvdx" (OuterVolumeSpecName: "kube-api-access-mlvdx") pod "9d6c2dd5-e12e-427c-a512-496546dcc309" (UID: "9d6c2dd5-e12e-427c-a512-496546dcc309"). InnerVolumeSpecName "kube-api-access-mlvdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.900643 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9d6c2dd5-e12e-427c-a512-496546dcc309" (UID: "9d6c2dd5-e12e-427c-a512-496546dcc309"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.942202 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlvdx\" (UniqueName: \"kubernetes.io/projected/9d6c2dd5-e12e-427c-a512-496546dcc309-kube-api-access-mlvdx\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.942253 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.942267 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d6c2dd5-e12e-427c-a512-496546dcc309-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.985245 4949 generic.go:334] "Generic (PLEG): container finished" podID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerID="fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29" exitCode=0 Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.985309 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf64k" event={"ID":"9d6c2dd5-e12e-427c-a512-496546dcc309","Type":"ContainerDied","Data":"fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29"} Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.985340 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sf64k" Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.985376 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sf64k" event={"ID":"9d6c2dd5-e12e-427c-a512-496546dcc309","Type":"ContainerDied","Data":"3ea23b8033f9c896fa9756b5106fc16d9390297354c4ac22190db49ee656de7e"} Feb 16 11:32:25 crc kubenswrapper[4949]: I0216 11:32:25.985417 4949 scope.go:117] "RemoveContainer" containerID="fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.026326 4949 scope.go:117] "RemoveContainer" containerID="b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.036309 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sf64k"] Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.094528 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sf64k"] Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.104201 4949 scope.go:117] "RemoveContainer" containerID="9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.277879 4949 scope.go:117] "RemoveContainer" containerID="fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29" Feb 16 11:32:26 crc kubenswrapper[4949]: E0216 11:32:26.280570 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29\": container with ID starting with fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29 not found: ID does not exist" containerID="fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.280622 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29"} err="failed to get container status \"fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29\": rpc error: code = NotFound desc = could not find container \"fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29\": container with ID starting with fe561fba57e47115c1190740c6167b2185a711055ff42c2feeea2259bfee7c29 not found: ID does not exist" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.280656 4949 scope.go:117] "RemoveContainer" containerID="b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6" Feb 16 11:32:26 crc kubenswrapper[4949]: E0216 11:32:26.280934 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6\": container with ID starting with b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6 not found: ID does not exist" containerID="b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.280958 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6"} err="failed to get container status \"b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6\": rpc error: code = NotFound desc = could not find container \"b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6\": container with ID starting with b67ef0a92aaef3c32167d649bd039fd8202f797ee17cfa63afe58f721a8bb2f6 not found: ID does not exist" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.280972 4949 scope.go:117] "RemoveContainer" containerID="9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9" Feb 16 11:32:26 crc kubenswrapper[4949]: E0216 11:32:26.281158 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9\": container with ID starting with 9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9 not found: ID does not exist" containerID="9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.281197 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9"} err="failed to get container status \"9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9\": rpc error: code = NotFound desc = could not find container \"9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9\": container with ID starting with 9e9b411057fa5df36bcba1a5fca2a9acab4e8e10294b089e32f06a10388330d9 not found: ID does not exist" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.673454 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.764423 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-sg-core-conf-yaml\") pod \"e8932af0-5e7c-4715-bdfa-5b84382387d3\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.765545 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-run-httpd\") pod \"e8932af0-5e7c-4715-bdfa-5b84382387d3\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.765590 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-combined-ca-bundle\") pod \"e8932af0-5e7c-4715-bdfa-5b84382387d3\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.765785 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-config-data\") pod \"e8932af0-5e7c-4715-bdfa-5b84382387d3\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.765834 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-log-httpd\") pod \"e8932af0-5e7c-4715-bdfa-5b84382387d3\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.765857 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-scripts\") pod \"e8932af0-5e7c-4715-bdfa-5b84382387d3\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.765968 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdlzp\" (UniqueName: \"kubernetes.io/projected/e8932af0-5e7c-4715-bdfa-5b84382387d3-kube-api-access-zdlzp\") pod \"e8932af0-5e7c-4715-bdfa-5b84382387d3\" (UID: \"e8932af0-5e7c-4715-bdfa-5b84382387d3\") " Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.767663 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e8932af0-5e7c-4715-bdfa-5b84382387d3" (UID: "e8932af0-5e7c-4715-bdfa-5b84382387d3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.768113 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e8932af0-5e7c-4715-bdfa-5b84382387d3" (UID: "e8932af0-5e7c-4715-bdfa-5b84382387d3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.769881 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.769910 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e8932af0-5e7c-4715-bdfa-5b84382387d3-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.774280 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8932af0-5e7c-4715-bdfa-5b84382387d3-kube-api-access-zdlzp" (OuterVolumeSpecName: "kube-api-access-zdlzp") pod "e8932af0-5e7c-4715-bdfa-5b84382387d3" (UID: "e8932af0-5e7c-4715-bdfa-5b84382387d3"). InnerVolumeSpecName "kube-api-access-zdlzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.806397 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-scripts" (OuterVolumeSpecName: "scripts") pod "e8932af0-5e7c-4715-bdfa-5b84382387d3" (UID: "e8932af0-5e7c-4715-bdfa-5b84382387d3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.837111 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e8932af0-5e7c-4715-bdfa-5b84382387d3" (UID: "e8932af0-5e7c-4715-bdfa-5b84382387d3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.872623 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.872662 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.872672 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdlzp\" (UniqueName: \"kubernetes.io/projected/e8932af0-5e7c-4715-bdfa-5b84382387d3-kube-api-access-zdlzp\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.908654 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8932af0-5e7c-4715-bdfa-5b84382387d3" (UID: "e8932af0-5e7c-4715-bdfa-5b84382387d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.935080 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-config-data" (OuterVolumeSpecName: "config-data") pod "e8932af0-5e7c-4715-bdfa-5b84382387d3" (UID: "e8932af0-5e7c-4715-bdfa-5b84382387d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.975533 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:26 crc kubenswrapper[4949]: I0216 11:32:26.975571 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8932af0-5e7c-4715-bdfa-5b84382387d3-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.007113 4949 generic.go:334] "Generic (PLEG): container finished" podID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerID="920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594" exitCode=137 Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.007158 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerDied","Data":"920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594"} Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.007207 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e8932af0-5e7c-4715-bdfa-5b84382387d3","Type":"ContainerDied","Data":"38722a9d34bd46e0c5a3dd45eb6410eb3144e7095733b31dbbc2a905b8e6a1ae"} Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.007229 4949 scope.go:117] "RemoveContainer" containerID="920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.007360 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.056588 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.139561 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.186308 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:32:27 crc kubenswrapper[4949]: E0216 11:32:27.187029 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90b83c30-3793-4e05-80d4-3a714ad09404" containerName="heat-engine" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187054 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="90b83c30-3793-4e05-80d4-3a714ad09404" containerName="heat-engine" Feb 16 11:32:27 crc kubenswrapper[4949]: E0216 11:32:27.187067 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="ceilometer-notification-agent" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187076 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="ceilometer-notification-agent" Feb 16 11:32:27 crc kubenswrapper[4949]: E0216 11:32:27.187093 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="extract-content" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187104 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="extract-content" Feb 16 11:32:27 crc kubenswrapper[4949]: E0216 11:32:27.187114 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="extract-utilities" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187123 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="extract-utilities" Feb 16 11:32:27 crc kubenswrapper[4949]: E0216 11:32:27.187158 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="ceilometer-central-agent" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187165 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="ceilometer-central-agent" Feb 16 11:32:27 crc kubenswrapper[4949]: E0216 11:32:27.187287 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="proxy-httpd" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187296 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="proxy-httpd" Feb 16 11:32:27 crc kubenswrapper[4949]: E0216 11:32:27.187319 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="registry-server" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187328 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="registry-server" Feb 16 11:32:27 crc kubenswrapper[4949]: E0216 11:32:27.187347 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="sg-core" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187354 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="sg-core" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187663 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="ceilometer-notification-agent" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187681 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="90b83c30-3793-4e05-80d4-3a714ad09404" containerName="heat-engine" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187708 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="ceilometer-central-agent" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187727 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4998389-d4f4-44f0-b048-988b96e27acc" containerName="heat-cfnapi" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187738 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" containerName="registry-server" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187750 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="sg-core" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.187764 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" containerName="proxy-httpd" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.191344 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.194052 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.198453 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.205218 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.259532 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d6c2dd5-e12e-427c-a512-496546dcc309" path="/var/lib/kubelet/pods/9d6c2dd5-e12e-427c-a512-496546dcc309/volumes" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.265752 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8932af0-5e7c-4715-bdfa-5b84382387d3" path="/var/lib/kubelet/pods/e8932af0-5e7c-4715-bdfa-5b84382387d3/volumes" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.266931 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.266965 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.291035 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-run-httpd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.291353 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5whd\" (UniqueName: \"kubernetes.io/projected/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-kube-api-access-v5whd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.291380 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-log-httpd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.291426 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-config-data\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.291531 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.291553 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.291662 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-scripts\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.393828 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5whd\" (UniqueName: \"kubernetes.io/projected/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-kube-api-access-v5whd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.393889 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-log-httpd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.393971 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-config-data\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.394110 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.394134 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.394314 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-scripts\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.394400 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-run-httpd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.394979 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-run-httpd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.395608 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-log-httpd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.403536 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.403766 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.404641 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-config-data\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.413841 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5whd\" (UniqueName: \"kubernetes.io/projected/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-kube-api-access-v5whd\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.419244 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-scripts\") pod \"ceilometer-0\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " pod="openstack/ceilometer-0" Feb 16 11:32:27 crc kubenswrapper[4949]: I0216 11:32:27.635864 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:32:28 crc kubenswrapper[4949]: I0216 11:32:28.779761 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:28 crc kubenswrapper[4949]: I0216 11:32:28.780240 4949 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 16 11:32:28 crc kubenswrapper[4949]: I0216 11:32:28.842777 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 16 11:32:28 crc kubenswrapper[4949]: I0216 11:32:28.886458 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:28 crc kubenswrapper[4949]: I0216 11:32:28.888074 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:29 crc kubenswrapper[4949]: I0216 11:32:29.969690 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-g6wsf" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="registry-server" probeResult="failure" output=< Feb 16 11:32:29 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:32:29 crc kubenswrapper[4949]: > Feb 16 11:32:34 crc kubenswrapper[4949]: I0216 11:32:34.604417 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:32:34 crc kubenswrapper[4949]: I0216 11:32:34.991511 4949 scope.go:117] "RemoveContainer" containerID="e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32" Feb 16 11:32:35 crc kubenswrapper[4949]: I0216 11:32:35.101243 4949 scope.go:117] "RemoveContainer" containerID="404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d" Feb 16 11:32:35 crc kubenswrapper[4949]: I0216 11:32:35.301295 4949 scope.go:117] "RemoveContainer" containerID="8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.429068 4949 scope.go:117] "RemoveContainer" containerID="920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594" Feb 16 11:32:36 crc kubenswrapper[4949]: E0216 11:32:35.433424 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594\": container with ID starting with 920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594 not found: ID does not exist" containerID="920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.433485 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594"} err="failed to get container status \"920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594\": rpc error: code = NotFound desc = could not find container \"920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594\": container with ID starting with 920daf415e560c1a54aa341701a7ea5943c37886c9cba278aee3a690764e4594 not found: ID does not exist" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.433707 4949 scope.go:117] "RemoveContainer" containerID="e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32" Feb 16 11:32:36 crc kubenswrapper[4949]: E0216 11:32:35.443054 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32\": container with ID starting with e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32 not found: ID does not exist" containerID="e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.443117 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32"} err="failed to get container status \"e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32\": rpc error: code = NotFound desc = could not find container \"e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32\": container with ID starting with e37ba63457e430f59c62f0dd857c91021e6ab55bd140165e5e96537db87dbc32 not found: ID does not exist" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.443157 4949 scope.go:117] "RemoveContainer" containerID="404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d" Feb 16 11:32:36 crc kubenswrapper[4949]: E0216 11:32:35.467470 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d\": container with ID starting with 404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d not found: ID does not exist" containerID="404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.467518 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d"} err="failed to get container status \"404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d\": rpc error: code = NotFound desc = could not find container \"404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d\": container with ID starting with 404aa4f187586988a1d8116851358debcbe5512f1358f52befa7d86132c1771d not found: ID does not exist" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.467553 4949 scope.go:117] "RemoveContainer" containerID="8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559" Feb 16 11:32:36 crc kubenswrapper[4949]: E0216 11:32:35.492816 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559\": container with ID starting with 8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559 not found: ID does not exist" containerID="8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.492860 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559"} err="failed to get container status \"8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559\": rpc error: code = NotFound desc = could not find container \"8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559\": container with ID starting with 8243c747a7d69db63a1cf450201f51b1d98077e5b1913f35ef3963a977dce559 not found: ID does not exist" Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:35.739166 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:32:36 crc kubenswrapper[4949]: W0216 11:32:35.747214 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4c12d9b_68b8_4a21_9dfc_d1fae847db6a.slice/crio-a7cbfa8e4894c2f9e44f08f8964ad6e4dcbc823fb1ce59e7d0b53a0b8bed1c70 WatchSource:0}: Error finding container a7cbfa8e4894c2f9e44f08f8964ad6e4dcbc823fb1ce59e7d0b53a0b8bed1c70: Status 404 returned error can't find the container with id a7cbfa8e4894c2f9e44f08f8964ad6e4dcbc823fb1ce59e7d0b53a0b8bed1c70 Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:36.205045 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerStarted","Data":"a7cbfa8e4894c2f9e44f08f8964ad6e4dcbc823fb1ce59e7d0b53a0b8bed1c70"} Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:36.207089 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" event={"ID":"0794dfd5-3a8f-4e35-bb3e-2dc32881680d","Type":"ContainerStarted","Data":"3c591b2c1c7369149a189d0b2cc914d3fc49e8cb2cac8eaa164a499c20cf9927"} Feb 16 11:32:36 crc kubenswrapper[4949]: I0216 11:32:36.236746 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" podStartSLOduration=3.612822235 podStartE2EDuration="18.236721369s" podCreationTimestamp="2026-02-16 11:32:18 +0000 UTC" firstStartedPulling="2026-02-16 11:32:20.490478552 +0000 UTC m=+1530.119812727" lastFinishedPulling="2026-02-16 11:32:35.114377696 +0000 UTC m=+1544.743711861" observedRunningTime="2026-02-16 11:32:36.221839134 +0000 UTC m=+1545.851173329" watchObservedRunningTime="2026-02-16 11:32:36.236721369 +0000 UTC m=+1545.866055534" Feb 16 11:32:37 crc kubenswrapper[4949]: I0216 11:32:37.224661 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerStarted","Data":"f0ab89860541849f86fc65fb7aa4f7e00c2b1cd9b604a4c3bb585e966c63e0cf"} Feb 16 11:32:38 crc kubenswrapper[4949]: I0216 11:32:38.272620 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerStarted","Data":"e83145291040aaa7412bca26e9ebbc0ef240228c0112d4485ab0b6a8218bdd43"} Feb 16 11:32:39 crc kubenswrapper[4949]: I0216 11:32:39.293499 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerStarted","Data":"ce61347c30c9ad9427f64e0637c643f366bbbfb87c033122fb6afeb78aad6ec5"} Feb 16 11:32:39 crc kubenswrapper[4949]: I0216 11:32:39.945701 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-g6wsf" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="registry-server" probeResult="failure" output=< Feb 16 11:32:39 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:32:39 crc kubenswrapper[4949]: > Feb 16 11:32:40 crc kubenswrapper[4949]: I0216 11:32:40.312460 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerStarted","Data":"b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c"} Feb 16 11:32:40 crc kubenswrapper[4949]: I0216 11:32:40.312710 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="ceilometer-central-agent" containerID="cri-o://f0ab89860541849f86fc65fb7aa4f7e00c2b1cd9b604a4c3bb585e966c63e0cf" gracePeriod=30 Feb 16 11:32:40 crc kubenswrapper[4949]: I0216 11:32:40.313006 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:32:40 crc kubenswrapper[4949]: I0216 11:32:40.313070 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="proxy-httpd" containerID="cri-o://b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c" gracePeriod=30 Feb 16 11:32:40 crc kubenswrapper[4949]: I0216 11:32:40.313199 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="sg-core" containerID="cri-o://ce61347c30c9ad9427f64e0637c643f366bbbfb87c033122fb6afeb78aad6ec5" gracePeriod=30 Feb 16 11:32:40 crc kubenswrapper[4949]: I0216 11:32:40.313259 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="ceilometer-notification-agent" containerID="cri-o://e83145291040aaa7412bca26e9ebbc0ef240228c0112d4485ab0b6a8218bdd43" gracePeriod=30 Feb 16 11:32:40 crc kubenswrapper[4949]: I0216 11:32:40.376432 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=9.265050368 podStartE2EDuration="13.376405153s" podCreationTimestamp="2026-02-16 11:32:27 +0000 UTC" firstStartedPulling="2026-02-16 11:32:35.751729307 +0000 UTC m=+1545.381063482" lastFinishedPulling="2026-02-16 11:32:39.863084112 +0000 UTC m=+1549.492418267" observedRunningTime="2026-02-16 11:32:40.358055959 +0000 UTC m=+1549.987390124" watchObservedRunningTime="2026-02-16 11:32:40.376405153 +0000 UTC m=+1550.005739328" Feb 16 11:32:41 crc kubenswrapper[4949]: I0216 11:32:41.327183 4949 generic.go:334] "Generic (PLEG): container finished" podID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerID="ce61347c30c9ad9427f64e0637c643f366bbbfb87c033122fb6afeb78aad6ec5" exitCode=2 Feb 16 11:32:41 crc kubenswrapper[4949]: I0216 11:32:41.327510 4949 generic.go:334] "Generic (PLEG): container finished" podID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerID="e83145291040aaa7412bca26e9ebbc0ef240228c0112d4485ab0b6a8218bdd43" exitCode=0 Feb 16 11:32:41 crc kubenswrapper[4949]: I0216 11:32:41.327232 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerDied","Data":"ce61347c30c9ad9427f64e0637c643f366bbbfb87c033122fb6afeb78aad6ec5"} Feb 16 11:32:41 crc kubenswrapper[4949]: I0216 11:32:41.327585 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerDied","Data":"e83145291040aaa7412bca26e9ebbc0ef240228c0112d4485ab0b6a8218bdd43"} Feb 16 11:32:48 crc kubenswrapper[4949]: I0216 11:32:48.959854 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:49 crc kubenswrapper[4949]: I0216 11:32:49.064152 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:49 crc kubenswrapper[4949]: I0216 11:32:49.219024 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g6wsf"] Feb 16 11:32:50 crc kubenswrapper[4949]: I0216 11:32:50.440161 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g6wsf" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="registry-server" containerID="cri-o://d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0" gracePeriod=2 Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.029119 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.095880 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-catalog-content\") pod \"91104758-7e02-4761-bc39-fbca029cda0f\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.095982 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-utilities\") pod \"91104758-7e02-4761-bc39-fbca029cda0f\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.096067 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntrkl\" (UniqueName: \"kubernetes.io/projected/91104758-7e02-4761-bc39-fbca029cda0f-kube-api-access-ntrkl\") pod \"91104758-7e02-4761-bc39-fbca029cda0f\" (UID: \"91104758-7e02-4761-bc39-fbca029cda0f\") " Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.096905 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-utilities" (OuterVolumeSpecName: "utilities") pod "91104758-7e02-4761-bc39-fbca029cda0f" (UID: "91104758-7e02-4761-bc39-fbca029cda0f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.097461 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.103457 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91104758-7e02-4761-bc39-fbca029cda0f-kube-api-access-ntrkl" (OuterVolumeSpecName: "kube-api-access-ntrkl") pod "91104758-7e02-4761-bc39-fbca029cda0f" (UID: "91104758-7e02-4761-bc39-fbca029cda0f"). InnerVolumeSpecName "kube-api-access-ntrkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.167243 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "91104758-7e02-4761-bc39-fbca029cda0f" (UID: "91104758-7e02-4761-bc39-fbca029cda0f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.199018 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91104758-7e02-4761-bc39-fbca029cda0f-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.199058 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntrkl\" (UniqueName: \"kubernetes.io/projected/91104758-7e02-4761-bc39-fbca029cda0f-kube-api-access-ntrkl\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.457769 4949 generic.go:334] "Generic (PLEG): container finished" podID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerID="f0ab89860541849f86fc65fb7aa4f7e00c2b1cd9b604a4c3bb585e966c63e0cf" exitCode=0 Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.457880 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerDied","Data":"f0ab89860541849f86fc65fb7aa4f7e00c2b1cd9b604a4c3bb585e966c63e0cf"} Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.462546 4949 generic.go:334] "Generic (PLEG): container finished" podID="91104758-7e02-4761-bc39-fbca029cda0f" containerID="d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0" exitCode=0 Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.462613 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g6wsf" event={"ID":"91104758-7e02-4761-bc39-fbca029cda0f","Type":"ContainerDied","Data":"d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0"} Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.462674 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g6wsf" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.462692 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g6wsf" event={"ID":"91104758-7e02-4761-bc39-fbca029cda0f","Type":"ContainerDied","Data":"c402755e8d8403d1ea26da2b690414f83754823c8798ca2757b6ddd22ae9dbdc"} Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.462727 4949 scope.go:117] "RemoveContainer" containerID="d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.508350 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g6wsf"] Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.510693 4949 scope.go:117] "RemoveContainer" containerID="3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.522718 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g6wsf"] Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.537971 4949 scope.go:117] "RemoveContainer" containerID="cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.562677 4949 scope.go:117] "RemoveContainer" containerID="d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0" Feb 16 11:32:51 crc kubenswrapper[4949]: E0216 11:32:51.563325 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0\": container with ID starting with d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0 not found: ID does not exist" containerID="d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.563374 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0"} err="failed to get container status \"d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0\": rpc error: code = NotFound desc = could not find container \"d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0\": container with ID starting with d70527d358ac42dd45d165bd3ea5c55a79bf25df3e6f6c5570b6363e8501dea0 not found: ID does not exist" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.563410 4949 scope.go:117] "RemoveContainer" containerID="3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392" Feb 16 11:32:51 crc kubenswrapper[4949]: E0216 11:32:51.563855 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392\": container with ID starting with 3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392 not found: ID does not exist" containerID="3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.563892 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392"} err="failed to get container status \"3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392\": rpc error: code = NotFound desc = could not find container \"3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392\": container with ID starting with 3305dac371a44bb2843e37ca24be8aa1d61d0a9f4c278f19a3fedf34cef4c392 not found: ID does not exist" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.563914 4949 scope.go:117] "RemoveContainer" containerID="cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd" Feb 16 11:32:51 crc kubenswrapper[4949]: E0216 11:32:51.564472 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd\": container with ID starting with cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd not found: ID does not exist" containerID="cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd" Feb 16 11:32:51 crc kubenswrapper[4949]: I0216 11:32:51.564497 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd"} err="failed to get container status \"cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd\": rpc error: code = NotFound desc = could not find container \"cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd\": container with ID starting with cde13dd6b8d050db0e899c04f2440ddd270a3c82e8b009f5f7cebc5a293aa8dd not found: ID does not exist" Feb 16 11:32:53 crc kubenswrapper[4949]: I0216 11:32:53.248357 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91104758-7e02-4761-bc39-fbca029cda0f" path="/var/lib/kubelet/pods/91104758-7e02-4761-bc39-fbca029cda0f/volumes" Feb 16 11:32:54 crc kubenswrapper[4949]: I0216 11:32:54.497435 4949 generic.go:334] "Generic (PLEG): container finished" podID="0794dfd5-3a8f-4e35-bb3e-2dc32881680d" containerID="3c591b2c1c7369149a189d0b2cc914d3fc49e8cb2cac8eaa164a499c20cf9927" exitCode=0 Feb 16 11:32:54 crc kubenswrapper[4949]: I0216 11:32:54.497506 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" event={"ID":"0794dfd5-3a8f-4e35-bb3e-2dc32881680d","Type":"ContainerDied","Data":"3c591b2c1c7369149a189d0b2cc914d3fc49e8cb2cac8eaa164a499c20cf9927"} Feb 16 11:32:55 crc kubenswrapper[4949]: I0216 11:32:55.962146 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.025161 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-scripts\") pod \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.025798 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8x8xn\" (UniqueName: \"kubernetes.io/projected/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-kube-api-access-8x8xn\") pod \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.025839 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-config-data\") pod \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.025872 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-combined-ca-bundle\") pod \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\" (UID: \"0794dfd5-3a8f-4e35-bb3e-2dc32881680d\") " Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.033892 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-scripts" (OuterVolumeSpecName: "scripts") pod "0794dfd5-3a8f-4e35-bb3e-2dc32881680d" (UID: "0794dfd5-3a8f-4e35-bb3e-2dc32881680d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.035589 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-kube-api-access-8x8xn" (OuterVolumeSpecName: "kube-api-access-8x8xn") pod "0794dfd5-3a8f-4e35-bb3e-2dc32881680d" (UID: "0794dfd5-3a8f-4e35-bb3e-2dc32881680d"). InnerVolumeSpecName "kube-api-access-8x8xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.060459 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-config-data" (OuterVolumeSpecName: "config-data") pod "0794dfd5-3a8f-4e35-bb3e-2dc32881680d" (UID: "0794dfd5-3a8f-4e35-bb3e-2dc32881680d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.061435 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0794dfd5-3a8f-4e35-bb3e-2dc32881680d" (UID: "0794dfd5-3a8f-4e35-bb3e-2dc32881680d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.130236 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8x8xn\" (UniqueName: \"kubernetes.io/projected/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-kube-api-access-8x8xn\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.130275 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.130287 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.130295 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0794dfd5-3a8f-4e35-bb3e-2dc32881680d-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.530782 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" event={"ID":"0794dfd5-3a8f-4e35-bb3e-2dc32881680d","Type":"ContainerDied","Data":"def0b4871a984223bbeb5422cb7e7189ee521068235a7384de75f97e247dde34"} Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.530843 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="def0b4871a984223bbeb5422cb7e7189ee521068235a7384de75f97e247dde34" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.530922 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-rv8d4" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.688879 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 16 11:32:56 crc kubenswrapper[4949]: E0216 11:32:56.689500 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0794dfd5-3a8f-4e35-bb3e-2dc32881680d" containerName="nova-cell0-conductor-db-sync" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.689518 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0794dfd5-3a8f-4e35-bb3e-2dc32881680d" containerName="nova-cell0-conductor-db-sync" Feb 16 11:32:56 crc kubenswrapper[4949]: E0216 11:32:56.689533 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="extract-content" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.689542 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="extract-content" Feb 16 11:32:56 crc kubenswrapper[4949]: E0216 11:32:56.689555 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="extract-utilities" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.689561 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="extract-utilities" Feb 16 11:32:56 crc kubenswrapper[4949]: E0216 11:32:56.689574 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="registry-server" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.689580 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="registry-server" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.689846 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="91104758-7e02-4761-bc39-fbca029cda0f" containerName="registry-server" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.689881 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="0794dfd5-3a8f-4e35-bb3e-2dc32881680d" containerName="nova-cell0-conductor-db-sync" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.690876 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.693985 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.695400 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-qjd76" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.712822 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.847166 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkjk9\" (UniqueName: \"kubernetes.io/projected/0711e67d-8056-4831-b1a8-1ff9e0399a76-kube-api-access-mkjk9\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.847369 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0711e67d-8056-4831-b1a8-1ff9e0399a76-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.847769 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0711e67d-8056-4831-b1a8-1ff9e0399a76-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.958241 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkjk9\" (UniqueName: \"kubernetes.io/projected/0711e67d-8056-4831-b1a8-1ff9e0399a76-kube-api-access-mkjk9\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.958407 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0711e67d-8056-4831-b1a8-1ff9e0399a76-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.958710 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0711e67d-8056-4831-b1a8-1ff9e0399a76-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.962905 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0711e67d-8056-4831-b1a8-1ff9e0399a76-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.963578 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0711e67d-8056-4831-b1a8-1ff9e0399a76-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:56 crc kubenswrapper[4949]: I0216 11:32:56.976190 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkjk9\" (UniqueName: \"kubernetes.io/projected/0711e67d-8056-4831-b1a8-1ff9e0399a76-kube-api-access-mkjk9\") pod \"nova-cell0-conductor-0\" (UID: \"0711e67d-8056-4831-b1a8-1ff9e0399a76\") " pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:57 crc kubenswrapper[4949]: I0216 11:32:57.050636 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:57 crc kubenswrapper[4949]: I0216 11:32:57.533261 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 16 11:32:57 crc kubenswrapper[4949]: I0216 11:32:57.647907 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 16 11:32:58 crc kubenswrapper[4949]: I0216 11:32:58.557595 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0711e67d-8056-4831-b1a8-1ff9e0399a76","Type":"ContainerStarted","Data":"476732bae8c5803a0d2de03432c940f633141fa4424a94d4ff94bb62b140ea5f"} Feb 16 11:32:58 crc kubenswrapper[4949]: I0216 11:32:58.557931 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Feb 16 11:32:58 crc kubenswrapper[4949]: I0216 11:32:58.557944 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0711e67d-8056-4831-b1a8-1ff9e0399a76","Type":"ContainerStarted","Data":"5e4cfe062cedcd1c7956f6d0b44dd7467cf88545310916378ae1bb53c53c2119"} Feb 16 11:32:58 crc kubenswrapper[4949]: I0216 11:32:58.587421 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.587397333 podStartE2EDuration="2.587397333s" podCreationTimestamp="2026-02-16 11:32:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:32:58.577443978 +0000 UTC m=+1568.206778143" watchObservedRunningTime="2026-02-16 11:32:58.587397333 +0000 UTC m=+1568.216731498" Feb 16 11:33:02 crc kubenswrapper[4949]: I0216 11:33:02.088355 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Feb 16 11:33:02 crc kubenswrapper[4949]: I0216 11:33:02.960079 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-j9tzp"] Feb 16 11:33:02 crc kubenswrapper[4949]: I0216 11:33:02.963538 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:02 crc kubenswrapper[4949]: I0216 11:33:02.967621 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Feb 16 11:33:02 crc kubenswrapper[4949]: I0216 11:33:02.967708 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Feb 16 11:33:02 crc kubenswrapper[4949]: I0216 11:33:02.977782 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-j9tzp"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.032223 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4cnh\" (UniqueName: \"kubernetes.io/projected/6a1e0048-b760-4b8c-a65b-7f0224833721-kube-api-access-n4cnh\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.032301 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.032338 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-config-data\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.032553 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-scripts\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.134589 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-scripts\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.134675 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4cnh\" (UniqueName: \"kubernetes.io/projected/6a1e0048-b760-4b8c-a65b-7f0224833721-kube-api-access-n4cnh\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.134724 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.134762 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-config-data\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.144920 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-scripts\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.157942 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.160075 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-config-data\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.161827 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4cnh\" (UniqueName: \"kubernetes.io/projected/6a1e0048-b760-4b8c-a65b-7f0224833721-kube-api-access-n4cnh\") pod \"nova-cell0-cell-mapping-j9tzp\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.298969 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.299697 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.306094 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.323151 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.334260 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.355784 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e673569-9ea0-48bd-8a23-d3766abebe4d-logs\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.356002 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-config-data\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.356049 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfdtd\" (UniqueName: \"kubernetes.io/projected/9e673569-9ea0-48bd-8a23-d3766abebe4d-kube-api-access-tfdtd\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.356144 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.377219 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.379268 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.380016 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.458652 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-config-data\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.458704 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.458743 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfdtd\" (UniqueName: \"kubernetes.io/projected/9e673569-9ea0-48bd-8a23-d3766abebe4d-kube-api-access-tfdtd\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.458830 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.458928 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e673569-9ea0-48bd-8a23-d3766abebe4d-logs\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.458955 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-config-data\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.459017 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kw58l\" (UniqueName: \"kubernetes.io/projected/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-kube-api-access-kw58l\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.460399 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e673569-9ea0-48bd-8a23-d3766abebe4d-logs\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.464251 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.471854 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-config-data\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.483666 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.505384 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfdtd\" (UniqueName: \"kubernetes.io/projected/9e673569-9ea0-48bd-8a23-d3766abebe4d-kube-api-access-tfdtd\") pod \"nova-api-0\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.505882 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-sgsmx"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.507654 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.563265 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-operator-scripts\") pod \"aodh-db-create-sgsmx\" (UID: \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\") " pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.563370 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4phn\" (UniqueName: \"kubernetes.io/projected/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-kube-api-access-h4phn\") pod \"aodh-db-create-sgsmx\" (UID: \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\") " pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.563427 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-config-data\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.563548 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kw58l\" (UniqueName: \"kubernetes.io/projected/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-kube-api-access-kw58l\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.577393 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.586719 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.589076 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.591037 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.593457 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-config-data\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.622437 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-bf0e-account-create-update-csrj2"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.626322 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.628528 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.629336 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.634937 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.637252 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kw58l\" (UniqueName: \"kubernetes.io/projected/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-kube-api-access-kw58l\") pod \"nova-scheduler-0\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.663026 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-sgsmx"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.682310 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.693256 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-operator-scripts\") pod \"aodh-db-create-sgsmx\" (UID: \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\") " pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.693653 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4phn\" (UniqueName: \"kubernetes.io/projected/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-kube-api-access-h4phn\") pod \"aodh-db-create-sgsmx\" (UID: \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\") " pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.693844 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-operator-scripts\") pod \"aodh-bf0e-account-create-update-csrj2\" (UID: \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\") " pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.693935 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.694310 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99c5p\" (UniqueName: \"kubernetes.io/projected/5a95f751-76e3-4289-abf1-0328e14e6ac8-kube-api-access-99c5p\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.694480 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77wvd\" (UniqueName: \"kubernetes.io/projected/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-kube-api-access-77wvd\") pod \"aodh-bf0e-account-create-update-csrj2\" (UID: \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\") " pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.682926 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.696703 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-operator-scripts\") pod \"aodh-db-create-sgsmx\" (UID: \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\") " pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.732877 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4phn\" (UniqueName: \"kubernetes.io/projected/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-kube-api-access-h4phn\") pod \"aodh-db-create-sgsmx\" (UID: \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\") " pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.740146 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-bf0e-account-create-update-csrj2"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.746993 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.788508 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.798256 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-operator-scripts\") pod \"aodh-bf0e-account-create-update-csrj2\" (UID: \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\") " pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.798359 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.798620 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99c5p\" (UniqueName: \"kubernetes.io/projected/5a95f751-76e3-4289-abf1-0328e14e6ac8-kube-api-access-99c5p\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.798662 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77wvd\" (UniqueName: \"kubernetes.io/projected/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-kube-api-access-77wvd\") pod \"aodh-bf0e-account-create-update-csrj2\" (UID: \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\") " pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.798701 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.808378 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.810804 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-operator-scripts\") pod \"aodh-bf0e-account-create-update-csrj2\" (UID: \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\") " pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.819853 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.843159 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99c5p\" (UniqueName: \"kubernetes.io/projected/5a95f751-76e3-4289-abf1-0328e14e6ac8-kube-api-access-99c5p\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:03 crc kubenswrapper[4949]: I0216 11:33:03.860705 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77wvd\" (UniqueName: \"kubernetes.io/projected/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-kube-api-access-77wvd\") pod \"aodh-bf0e-account-create-update-csrj2\" (UID: \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\") " pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.015889 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.018668 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.034798 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.036032 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.091565 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-4mz4p"] Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.094911 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.104086 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.108279 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-4mz4p"] Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.121491 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-config-data\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.121633 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4efbbb4-0e96-441e-a838-c5af61da8cd5-logs\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.121705 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.121816 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn4mr\" (UniqueName: \"kubernetes.io/projected/e4efbbb4-0e96-441e-a838-c5af61da8cd5-kube-api-access-dn4mr\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.131040 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229154 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229343 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-config\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229368 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4efbbb4-0e96-441e-a838-c5af61da8cd5-logs\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229473 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229656 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn4mr\" (UniqueName: \"kubernetes.io/projected/e4efbbb4-0e96-441e-a838-c5af61da8cd5-kube-api-access-dn4mr\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229727 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229760 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlvkr\" (UniqueName: \"kubernetes.io/projected/679eb150-43d4-444a-8f51-738e92914fde-kube-api-access-wlvkr\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229875 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229917 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-config-data\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.229941 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.230395 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4efbbb4-0e96-441e-a838-c5af61da8cd5-logs\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.284471 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-config-data\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.299243 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.310793 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn4mr\" (UniqueName: \"kubernetes.io/projected/e4efbbb4-0e96-441e-a838-c5af61da8cd5-kube-api-access-dn4mr\") pod \"nova-metadata-0\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.332104 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.332223 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-config\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.332371 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.332395 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlvkr\" (UniqueName: \"kubernetes.io/projected/679eb150-43d4-444a-8f51-738e92914fde-kube-api-access-wlvkr\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.332459 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.332486 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.333382 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.333440 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.333896 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-config\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.336058 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.336112 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.352484 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-j9tzp"] Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.359776 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlvkr\" (UniqueName: \"kubernetes.io/projected/679eb150-43d4-444a-8f51-738e92914fde-kube-api-access-wlvkr\") pod \"dnsmasq-dns-5fbc4d444f-4mz4p\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.365067 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.465589 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.565942 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.566357 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:33:04 crc kubenswrapper[4949]: I0216 11:33:04.789054 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j9tzp" event={"ID":"6a1e0048-b760-4b8c-a65b-7f0224833721","Type":"ContainerStarted","Data":"85ad27306e3b0384d42221535e9e542ee3755efe9269b6003bae3367a0e19a5a"} Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.181655 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.285828 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-sgsmx"] Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.356063 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.819520 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530","Type":"ContainerStarted","Data":"6c9be516b026e411e95e3bb728be946087fb9097a659a15644e4637157a34dec"} Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.847350 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-sgsmx" event={"ID":"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50","Type":"ContainerStarted","Data":"3c6dcee2df9ec41a1c4f4dd11f0181c87b6dc8ea304f8678fc30f6a1de7689a4"} Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.847412 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-sgsmx" event={"ID":"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50","Type":"ContainerStarted","Data":"9d9971473a5758f4f1a757e59fb80765ddda62e801a7cfc755f32982710b8846"} Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.878287 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9e673569-9ea0-48bd-8a23-d3766abebe4d","Type":"ContainerStarted","Data":"5080d2f94ddde178c55a62b04cfc8eca0a0d25d5af14c34a3d1a4990fe9847a0"} Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.885290 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j9tzp" event={"ID":"6a1e0048-b760-4b8c-a65b-7f0224833721","Type":"ContainerStarted","Data":"6b827f03206bb4c4e85fc8cee05019ee2e5acc8d8bbdbfe9b7315957449e82e1"} Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.886924 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-sgsmx" podStartSLOduration=2.886905744 podStartE2EDuration="2.886905744s" podCreationTimestamp="2026-02-16 11:33:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:05.879251525 +0000 UTC m=+1575.508585710" watchObservedRunningTime="2026-02-16 11:33:05.886905744 +0000 UTC m=+1575.516239909" Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.992769 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-j9tzp" podStartSLOduration=3.992744091 podStartE2EDuration="3.992744091s" podCreationTimestamp="2026-02-16 11:33:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:05.925003413 +0000 UTC m=+1575.554337588" watchObservedRunningTime="2026-02-16 11:33:05.992744091 +0000 UTC m=+1575.622078246" Feb 16 11:33:05 crc kubenswrapper[4949]: I0216 11:33:05.995979 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.029902 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.058374 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-bf0e-account-create-update-csrj2"] Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.089978 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-4mz4p"] Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.287424 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-hm2m6"] Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.290034 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.298008 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.298404 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.312975 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-hm2m6"] Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.409319 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-config-data\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.409752 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.409969 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bjvz\" (UniqueName: \"kubernetes.io/projected/a0bd872f-2560-48de-997e-a2ce07f4f0b7-kube-api-access-7bjvz\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.467345 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-scripts\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.571401 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-config-data\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.571792 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.571872 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bjvz\" (UniqueName: \"kubernetes.io/projected/a0bd872f-2560-48de-997e-a2ce07f4f0b7-kube-api-access-7bjvz\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.571990 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-scripts\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.581871 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-scripts\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.598268 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bjvz\" (UniqueName: \"kubernetes.io/projected/a0bd872f-2560-48de-997e-a2ce07f4f0b7-kube-api-access-7bjvz\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.598421 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.608572 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-config-data\") pod \"nova-cell1-conductor-db-sync-hm2m6\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.693552 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.930224 4949 generic.go:334] "Generic (PLEG): container finished" podID="ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50" containerID="3c6dcee2df9ec41a1c4f4dd11f0181c87b6dc8ea304f8678fc30f6a1de7689a4" exitCode=0 Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.930337 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-sgsmx" event={"ID":"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50","Type":"ContainerDied","Data":"3c6dcee2df9ec41a1c4f4dd11f0181c87b6dc8ea304f8678fc30f6a1de7689a4"} Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.964964 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-bf0e-account-create-update-csrj2" event={"ID":"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1","Type":"ContainerStarted","Data":"b787d4f7ac302d60713add31accf46680c357ad5ce0ab8cb98e19dadc1700aaa"} Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.965290 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-bf0e-account-create-update-csrj2" event={"ID":"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1","Type":"ContainerStarted","Data":"eae75cd4c776e215edb1fd6c6a55ccb2d55af596fe25debe96513d0f5594893b"} Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.971612 4949 generic.go:334] "Generic (PLEG): container finished" podID="679eb150-43d4-444a-8f51-738e92914fde" containerID="68fe340601768a70e7c6beb6b1215686b60086aa4e5a1eef7f4053cd0aacb042" exitCode=0 Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.971712 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" event={"ID":"679eb150-43d4-444a-8f51-738e92914fde","Type":"ContainerDied","Data":"68fe340601768a70e7c6beb6b1215686b60086aa4e5a1eef7f4053cd0aacb042"} Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.971741 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" event={"ID":"679eb150-43d4-444a-8f51-738e92914fde","Type":"ContainerStarted","Data":"f20f146d1fd7c13096e2d96649230a04bb1c3c5943ff83408886e1e4bd356465"} Feb 16 11:33:06 crc kubenswrapper[4949]: I0216 11:33:06.992490 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5a95f751-76e3-4289-abf1-0328e14e6ac8","Type":"ContainerStarted","Data":"f7d0bdd8536a8b3472848e665be42b8d75b79433beb8888d5796e789bb0f80ae"} Feb 16 11:33:07 crc kubenswrapper[4949]: I0216 11:33:07.013018 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e4efbbb4-0e96-441e-a838-c5af61da8cd5","Type":"ContainerStarted","Data":"ed74b4eacbeb9a612e51391670577d9f8a9c75ff2efa1b49b480cfff70349c06"} Feb 16 11:33:07 crc kubenswrapper[4949]: I0216 11:33:07.046528 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-bf0e-account-create-update-csrj2" podStartSLOduration=4.046501829 podStartE2EDuration="4.046501829s" podCreationTimestamp="2026-02-16 11:33:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:06.986994707 +0000 UTC m=+1576.616328872" watchObservedRunningTime="2026-02-16 11:33:07.046501829 +0000 UTC m=+1576.675835994" Feb 16 11:33:07 crc kubenswrapper[4949]: I0216 11:33:07.386500 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:07 crc kubenswrapper[4949]: I0216 11:33:07.434299 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:07 crc kubenswrapper[4949]: I0216 11:33:07.640275 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-hm2m6"] Feb 16 11:33:08 crc kubenswrapper[4949]: I0216 11:33:08.049933 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" event={"ID":"a0bd872f-2560-48de-997e-a2ce07f4f0b7","Type":"ContainerStarted","Data":"ba8ab6c66a20430c703e5c48bd3835c082ca95f7daaae276285a64f3466720d9"} Feb 16 11:33:08 crc kubenswrapper[4949]: I0216 11:33:08.050349 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" event={"ID":"a0bd872f-2560-48de-997e-a2ce07f4f0b7","Type":"ContainerStarted","Data":"6712ca4aa4e9379cb141283c0a3bb82f50c93ab1821dc32a911f543f3ab9d0a7"} Feb 16 11:33:08 crc kubenswrapper[4949]: I0216 11:33:08.052739 4949 generic.go:334] "Generic (PLEG): container finished" podID="d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1" containerID="b787d4f7ac302d60713add31accf46680c357ad5ce0ab8cb98e19dadc1700aaa" exitCode=0 Feb 16 11:33:08 crc kubenswrapper[4949]: I0216 11:33:08.052784 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-bf0e-account-create-update-csrj2" event={"ID":"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1","Type":"ContainerDied","Data":"b787d4f7ac302d60713add31accf46680c357ad5ce0ab8cb98e19dadc1700aaa"} Feb 16 11:33:08 crc kubenswrapper[4949]: I0216 11:33:08.056686 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" event={"ID":"679eb150-43d4-444a-8f51-738e92914fde","Type":"ContainerStarted","Data":"ae3369b1c59205788426355296ba345952b253e703e844a023bd92ef87423def"} Feb 16 11:33:08 crc kubenswrapper[4949]: I0216 11:33:08.056734 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:08 crc kubenswrapper[4949]: I0216 11:33:08.071022 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" podStartSLOduration=2.070994791 podStartE2EDuration="2.070994791s" podCreationTimestamp="2026-02-16 11:33:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:08.063658591 +0000 UTC m=+1577.692992776" watchObservedRunningTime="2026-02-16 11:33:08.070994791 +0000 UTC m=+1577.700328956" Feb 16 11:33:08 crc kubenswrapper[4949]: I0216 11:33:08.114628 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" podStartSLOduration=5.114605238 podStartE2EDuration="5.114605238s" podCreationTimestamp="2026-02-16 11:33:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:08.105454966 +0000 UTC m=+1577.734789131" watchObservedRunningTime="2026-02-16 11:33:08.114605238 +0000 UTC m=+1577.743939403" Feb 16 11:33:10 crc kubenswrapper[4949]: W0216 11:33:10.387758 4949 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podddd1d576_d2cb_4f8b_aa99_9d31cd1d6f50.slice/crio-conmon-3c6dcee2df9ec41a1c4f4dd11f0181c87b6dc8ea304f8678fc30f6a1de7689a4.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podddd1d576_d2cb_4f8b_aa99_9d31cd1d6f50.slice/crio-conmon-3c6dcee2df9ec41a1c4f4dd11f0181c87b6dc8ea304f8678fc30f6a1de7689a4.scope: no such file or directory Feb 16 11:33:10 crc kubenswrapper[4949]: W0216 11:33:10.388296 4949 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podddd1d576_d2cb_4f8b_aa99_9d31cd1d6f50.slice/crio-3c6dcee2df9ec41a1c4f4dd11f0181c87b6dc8ea304f8678fc30f6a1de7689a4.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podddd1d576_d2cb_4f8b_aa99_9d31cd1d6f50.slice/crio-3c6dcee2df9ec41a1c4f4dd11f0181c87b6dc8ea304f8678fc30f6a1de7689a4.scope: no such file or directory Feb 16 11:33:10 crc kubenswrapper[4949]: E0216 11:33:10.399612 4949 manager.go:1116] Failed to create existing container: /kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice/crio-c402755e8d8403d1ea26da2b690414f83754823c8798ca2757b6ddd22ae9dbdc: Error finding container c402755e8d8403d1ea26da2b690414f83754823c8798ca2757b6ddd22ae9dbdc: Status 404 returned error can't find the container with id c402755e8d8403d1ea26da2b690414f83754823c8798ca2757b6ddd22ae9dbdc Feb 16 11:33:10 crc kubenswrapper[4949]: W0216 11:33:10.411535 4949 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd70eafc1_c4e9_4f70_bb2c_0a4b7e54bef1.slice/crio-conmon-b787d4f7ac302d60713add31accf46680c357ad5ce0ab8cb98e19dadc1700aaa.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd70eafc1_c4e9_4f70_bb2c_0a4b7e54bef1.slice/crio-conmon-b787d4f7ac302d60713add31accf46680c357ad5ce0ab8cb98e19dadc1700aaa.scope: no such file or directory Feb 16 11:33:10 crc kubenswrapper[4949]: W0216 11:33:10.411601 4949 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod679eb150_43d4_444a_8f51_738e92914fde.slice/crio-conmon-68fe340601768a70e7c6beb6b1215686b60086aa4e5a1eef7f4053cd0aacb042.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod679eb150_43d4_444a_8f51_738e92914fde.slice/crio-conmon-68fe340601768a70e7c6beb6b1215686b60086aa4e5a1eef7f4053cd0aacb042.scope: no such file or directory Feb 16 11:33:10 crc kubenswrapper[4949]: W0216 11:33:10.411627 4949 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd70eafc1_c4e9_4f70_bb2c_0a4b7e54bef1.slice/crio-b787d4f7ac302d60713add31accf46680c357ad5ce0ab8cb98e19dadc1700aaa.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd70eafc1_c4e9_4f70_bb2c_0a4b7e54bef1.slice/crio-b787d4f7ac302d60713add31accf46680c357ad5ce0ab8cb98e19dadc1700aaa.scope: no such file or directory Feb 16 11:33:10 crc kubenswrapper[4949]: W0216 11:33:10.417008 4949 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod679eb150_43d4_444a_8f51_738e92914fde.slice/crio-68fe340601768a70e7c6beb6b1215686b60086aa4e5a1eef7f4053cd0aacb042.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod679eb150_43d4_444a_8f51_738e92914fde.slice/crio-68fe340601768a70e7c6beb6b1215686b60086aa4e5a1eef7f4053cd0aacb042.scope: no such file or directory Feb 16 11:33:10 crc kubenswrapper[4949]: E0216 11:33:10.574336 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0794dfd5_3a8f_4e35_bb3e_2dc32881680d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0794dfd5_3a8f_4e35_bb3e_2dc32881680d.slice/crio-conmon-3c591b2c1c7369149a189d0b2cc914d3fc49e8cb2cac8eaa164a499c20cf9927.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0794dfd5_3a8f_4e35_bb3e_2dc32881680d.slice/crio-3c591b2c1c7369149a189d0b2cc914d3fc49e8cb2cac8eaa164a499c20cf9927.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0794dfd5_3a8f_4e35_bb3e_2dc32881680d.slice/crio-def0b4871a984223bbeb5422cb7e7189ee521068235a7384de75f97e247dde34\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4c12d9b_68b8_4a21_9dfc_d1fae847db6a.slice/crio-b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4c12d9b_68b8_4a21_9dfc_d1fae847db6a.slice/crio-conmon-b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c.scope\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:10 crc kubenswrapper[4949]: E0216 11:33:10.577770 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4c12d9b_68b8_4a21_9dfc_d1fae847db6a.slice/crio-b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4c12d9b_68b8_4a21_9dfc_d1fae847db6a.slice/crio-conmon-b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c.scope\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.165529 4949 generic.go:334] "Generic (PLEG): container finished" podID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerID="b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c" exitCode=137 Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.165635 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerDied","Data":"b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c"} Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.171518 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-sgsmx" event={"ID":"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50","Type":"ContainerDied","Data":"9d9971473a5758f4f1a757e59fb80765ddda62e801a7cfc755f32982710b8846"} Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.171837 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d9971473a5758f4f1a757e59fb80765ddda62e801a7cfc755f32982710b8846" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.178861 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-bf0e-account-create-update-csrj2" event={"ID":"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1","Type":"ContainerDied","Data":"eae75cd4c776e215edb1fd6c6a55ccb2d55af596fe25debe96513d0f5594893b"} Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.178907 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eae75cd4c776e215edb1fd6c6a55ccb2d55af596fe25debe96513d0f5594893b" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.219982 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.229689 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.393915 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4phn\" (UniqueName: \"kubernetes.io/projected/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-kube-api-access-h4phn\") pod \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\" (UID: \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.396590 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77wvd\" (UniqueName: \"kubernetes.io/projected/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-kube-api-access-77wvd\") pod \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\" (UID: \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.396655 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-operator-scripts\") pod \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\" (UID: \"d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.396729 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-operator-scripts\") pod \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\" (UID: \"ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.399206 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50" (UID: "ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.400939 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1" (UID: "d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.432614 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-kube-api-access-h4phn" (OuterVolumeSpecName: "kube-api-access-h4phn") pod "ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50" (UID: "ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50"). InnerVolumeSpecName "kube-api-access-h4phn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.432762 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-kube-api-access-77wvd" (OuterVolumeSpecName: "kube-api-access-77wvd") pod "d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1" (UID: "d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1"). InnerVolumeSpecName "kube-api-access-77wvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.468629 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.503474 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-log-httpd\") pod \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.503528 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-combined-ca-bundle\") pod \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.503638 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5whd\" (UniqueName: \"kubernetes.io/projected/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-kube-api-access-v5whd\") pod \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.503670 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-sg-core-conf-yaml\") pod \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.503833 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-config-data\") pod \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.503862 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-run-httpd\") pod \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.503924 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-scripts\") pod \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\" (UID: \"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a\") " Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.505631 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4phn\" (UniqueName: \"kubernetes.io/projected/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-kube-api-access-h4phn\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.505652 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77wvd\" (UniqueName: \"kubernetes.io/projected/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-kube-api-access-77wvd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.505662 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.505672 4949 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.505973 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" (UID: "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.508793 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" (UID: "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.524556 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-scripts" (OuterVolumeSpecName: "scripts") pod "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" (UID: "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.524871 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-kube-api-access-v5whd" (OuterVolumeSpecName: "kube-api-access-v5whd") pod "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" (UID: "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a"). InnerVolumeSpecName "kube-api-access-v5whd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.608567 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.608610 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5whd\" (UniqueName: \"kubernetes.io/projected/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-kube-api-access-v5whd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.608623 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.608632 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.732162 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" (UID: "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.784816 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" (UID: "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.814201 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.814233 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.910151 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-config-data" (OuterVolumeSpecName: "config-data") pod "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" (UID: "a4c12d9b-68b8-4a21-9dfc-d1fae847db6a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:11 crc kubenswrapper[4949]: I0216 11:33:11.917687 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.208597 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5a95f751-76e3-4289-abf1-0328e14e6ac8","Type":"ContainerStarted","Data":"e387f4a5f489eda015be94c35737cf691a286fb27aac46cd361b9541080f2962"} Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.208681 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="5a95f751-76e3-4289-abf1-0328e14e6ac8" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://e387f4a5f489eda015be94c35737cf691a286fb27aac46cd361b9541080f2962" gracePeriod=30 Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.214221 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530","Type":"ContainerStarted","Data":"934e0ddd69595392eb7a07f84670c395bd782ce3a941b7cbf57d80942351f48c"} Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.223606 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e4efbbb4-0e96-441e-a838-c5af61da8cd5","Type":"ContainerStarted","Data":"e49cd6158108fb2bf9fc5ccfbf9f70c7c45791adfb015fa8e73ecd6a46d90c62"} Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.223948 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e4efbbb4-0e96-441e-a838-c5af61da8cd5","Type":"ContainerStarted","Data":"078d2290d421b47a707eb2c33501a3cc920656883d53e1acb613ff2ad3f07692"} Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.223867 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerName="nova-metadata-metadata" containerID="cri-o://e49cd6158108fb2bf9fc5ccfbf9f70c7c45791adfb015fa8e73ecd6a46d90c62" gracePeriod=30 Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.223829 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerName="nova-metadata-log" containerID="cri-o://078d2290d421b47a707eb2c33501a3cc920656883d53e1acb613ff2ad3f07692" gracePeriod=30 Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.243328 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=4.216894062 podStartE2EDuration="9.243300261s" podCreationTimestamp="2026-02-16 11:33:03 +0000 UTC" firstStartedPulling="2026-02-16 11:33:05.997820906 +0000 UTC m=+1575.627155071" lastFinishedPulling="2026-02-16 11:33:11.024227105 +0000 UTC m=+1580.653561270" observedRunningTime="2026-02-16 11:33:12.233653155 +0000 UTC m=+1581.862987320" watchObservedRunningTime="2026-02-16 11:33:12.243300261 +0000 UTC m=+1581.872634416" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.247114 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4c12d9b-68b8-4a21-9dfc-d1fae847db6a","Type":"ContainerDied","Data":"a7cbfa8e4894c2f9e44f08f8964ad6e4dcbc823fb1ce59e7d0b53a0b8bed1c70"} Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.247176 4949 scope.go:117] "RemoveContainer" containerID="b11b280b282424897374c958f3926574db7995a402b9cf563744e1236b51989c" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.247382 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.273002 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-sgsmx" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.277176 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9e673569-9ea0-48bd-8a23-d3766abebe4d","Type":"ContainerStarted","Data":"3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87"} Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.277462 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-bf0e-account-create-update-csrj2" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.292404 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.6780372999999997 podStartE2EDuration="9.292376185s" podCreationTimestamp="2026-02-16 11:33:03 +0000 UTC" firstStartedPulling="2026-02-16 11:33:05.372051698 +0000 UTC m=+1575.001385863" lastFinishedPulling="2026-02-16 11:33:10.986390583 +0000 UTC m=+1580.615724748" observedRunningTime="2026-02-16 11:33:12.268865412 +0000 UTC m=+1581.898199577" watchObservedRunningTime="2026-02-16 11:33:12.292376185 +0000 UTC m=+1581.921710350" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.340358 4949 scope.go:117] "RemoveContainer" containerID="ce61347c30c9ad9427f64e0637c643f366bbbfb87c033122fb6afeb78aad6ec5" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.345783 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.286334548 podStartE2EDuration="9.345747861s" podCreationTimestamp="2026-02-16 11:33:03 +0000 UTC" firstStartedPulling="2026-02-16 11:33:05.975671063 +0000 UTC m=+1575.605005228" lastFinishedPulling="2026-02-16 11:33:11.035084376 +0000 UTC m=+1580.664418541" observedRunningTime="2026-02-16 11:33:12.296162993 +0000 UTC m=+1581.925497148" watchObservedRunningTime="2026-02-16 11:33:12.345747861 +0000 UTC m=+1581.975082016" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.385653 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.406778 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.460680 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:12 crc kubenswrapper[4949]: E0216 11:33:12.462460 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="ceilometer-central-agent" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.462519 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="ceilometer-central-agent" Feb 16 11:33:12 crc kubenswrapper[4949]: E0216 11:33:12.462544 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="ceilometer-notification-agent" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.462552 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="ceilometer-notification-agent" Feb 16 11:33:12 crc kubenswrapper[4949]: E0216 11:33:12.462578 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50" containerName="mariadb-database-create" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.462592 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50" containerName="mariadb-database-create" Feb 16 11:33:12 crc kubenswrapper[4949]: E0216 11:33:12.462606 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="proxy-httpd" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.462614 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="proxy-httpd" Feb 16 11:33:12 crc kubenswrapper[4949]: E0216 11:33:12.462635 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="sg-core" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.462644 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="sg-core" Feb 16 11:33:12 crc kubenswrapper[4949]: E0216 11:33:12.462666 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1" containerName="mariadb-account-create-update" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.462680 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1" containerName="mariadb-account-create-update" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.463010 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="ceilometer-notification-agent" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.463037 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="proxy-httpd" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.463066 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1" containerName="mariadb-account-create-update" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.463084 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="sg-core" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.463107 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" containerName="ceilometer-central-agent" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.463129 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50" containerName="mariadb-database-create" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.469191 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.470637 4949 scope.go:117] "RemoveContainer" containerID="e83145291040aaa7412bca26e9ebbc0ef240228c0112d4485ab0b6a8218bdd43" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.473378 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.473803 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.498477 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.535993 4949 scope.go:117] "RemoveContainer" containerID="f0ab89860541849f86fc65fb7aa4f7e00c2b1cd9b604a4c3bb585e966c63e0cf" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.544243 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-scripts\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.544323 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vg5x\" (UniqueName: \"kubernetes.io/projected/c46769b4-dd47-47eb-ae71-f1cf844b93ba-kube-api-access-7vg5x\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.544356 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-run-httpd\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.544410 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.544441 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-log-httpd\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.544493 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-config-data\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.544514 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.647161 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.647876 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-scripts\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.647944 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vg5x\" (UniqueName: \"kubernetes.io/projected/c46769b4-dd47-47eb-ae71-f1cf844b93ba-kube-api-access-7vg5x\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.647972 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-run-httpd\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.648023 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.648053 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-log-httpd\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.648121 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-config-data\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.648839 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-run-httpd\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.649442 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-log-httpd\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.652538 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-scripts\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.653397 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.656517 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.657572 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-config-data\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.684970 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vg5x\" (UniqueName: \"kubernetes.io/projected/c46769b4-dd47-47eb-ae71-f1cf844b93ba-kube-api-access-7vg5x\") pod \"ceilometer-0\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " pod="openstack/ceilometer-0" Feb 16 11:33:12 crc kubenswrapper[4949]: I0216 11:33:12.808848 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.275960 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4c12d9b-68b8-4a21-9dfc-d1fae847db6a" path="/var/lib/kubelet/pods/a4c12d9b-68b8-4a21-9dfc-d1fae847db6a/volumes" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.316980 4949 generic.go:334] "Generic (PLEG): container finished" podID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerID="e49cd6158108fb2bf9fc5ccfbf9f70c7c45791adfb015fa8e73ecd6a46d90c62" exitCode=0 Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.317046 4949 generic.go:334] "Generic (PLEG): container finished" podID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerID="078d2290d421b47a707eb2c33501a3cc920656883d53e1acb613ff2ad3f07692" exitCode=143 Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.317141 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e4efbbb4-0e96-441e-a838-c5af61da8cd5","Type":"ContainerDied","Data":"e49cd6158108fb2bf9fc5ccfbf9f70c7c45791adfb015fa8e73ecd6a46d90c62"} Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.317198 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e4efbbb4-0e96-441e-a838-c5af61da8cd5","Type":"ContainerDied","Data":"078d2290d421b47a707eb2c33501a3cc920656883d53e1acb613ff2ad3f07692"} Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.349612 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9e673569-9ea0-48bd-8a23-d3766abebe4d","Type":"ContainerStarted","Data":"a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396"} Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.408851 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.572542854 podStartE2EDuration="10.408825026s" podCreationTimestamp="2026-02-16 11:33:03 +0000 UTC" firstStartedPulling="2026-02-16 11:33:05.198720881 +0000 UTC m=+1574.828055046" lastFinishedPulling="2026-02-16 11:33:11.035003053 +0000 UTC m=+1580.664337218" observedRunningTime="2026-02-16 11:33:13.400096886 +0000 UTC m=+1583.029431041" watchObservedRunningTime="2026-02-16 11:33:13.408825026 +0000 UTC m=+1583.038159191" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.631464 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.631511 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.691314 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.691564 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.749570 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:13 crc kubenswrapper[4949]: W0216 11:33:13.752404 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc46769b4_dd47_47eb_ae71_f1cf844b93ba.slice/crio-2e3ef0fabd38027d63a8034b703011ce906b563f0c4d3402a8d292a2585b057f WatchSource:0}: Error finding container 2e3ef0fabd38027d63a8034b703011ce906b563f0c4d3402a8d292a2585b057f: Status 404 returned error can't find the container with id 2e3ef0fabd38027d63a8034b703011ce906b563f0c4d3402a8d292a2585b057f Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.764667 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.848931 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.901469 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-combined-ca-bundle\") pod \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.901539 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dn4mr\" (UniqueName: \"kubernetes.io/projected/e4efbbb4-0e96-441e-a838-c5af61da8cd5-kube-api-access-dn4mr\") pod \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.901791 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4efbbb4-0e96-441e-a838-c5af61da8cd5-logs\") pod \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.901831 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-config-data\") pod \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\" (UID: \"e4efbbb4-0e96-441e-a838-c5af61da8cd5\") " Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.905053 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4efbbb4-0e96-441e-a838-c5af61da8cd5-logs" (OuterVolumeSpecName: "logs") pod "e4efbbb4-0e96-441e-a838-c5af61da8cd5" (UID: "e4efbbb4-0e96-441e-a838-c5af61da8cd5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.937512 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4efbbb4-0e96-441e-a838-c5af61da8cd5-kube-api-access-dn4mr" (OuterVolumeSpecName: "kube-api-access-dn4mr") pod "e4efbbb4-0e96-441e-a838-c5af61da8cd5" (UID: "e4efbbb4-0e96-441e-a838-c5af61da8cd5"). InnerVolumeSpecName "kube-api-access-dn4mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.963489 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-8tvgc"] Feb 16 11:33:13 crc kubenswrapper[4949]: E0216 11:33:13.964321 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerName="nova-metadata-log" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.964337 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerName="nova-metadata-log" Feb 16 11:33:13 crc kubenswrapper[4949]: E0216 11:33:13.964348 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerName="nova-metadata-metadata" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.964354 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerName="nova-metadata-metadata" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.964618 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerName="nova-metadata-log" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.964651 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" containerName="nova-metadata-metadata" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.965695 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.972597 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4efbbb4-0e96-441e-a838-c5af61da8cd5" (UID: "e4efbbb4-0e96-441e-a838-c5af61da8cd5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.975595 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.976067 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-f89fs" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.977506 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-8tvgc"] Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.977812 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Feb 16 11:33:13 crc kubenswrapper[4949]: I0216 11:33:13.979569 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.014019 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-config-data\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.014205 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-662np\" (UniqueName: \"kubernetes.io/projected/167f9139-7451-4312-9b89-ebff291c748a-kube-api-access-662np\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.014304 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-scripts\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.014361 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-combined-ca-bundle\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.016145 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dn4mr\" (UniqueName: \"kubernetes.io/projected/e4efbbb4-0e96-441e-a838-c5af61da8cd5-kube-api-access-dn4mr\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.016188 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4efbbb4-0e96-441e-a838-c5af61da8cd5-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.016204 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.098039 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.100975 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-config-data" (OuterVolumeSpecName: "config-data") pod "e4efbbb4-0e96-441e-a838-c5af61da8cd5" (UID: "e4efbbb4-0e96-441e-a838-c5af61da8cd5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.119570 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-config-data\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.119644 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-662np\" (UniqueName: \"kubernetes.io/projected/167f9139-7451-4312-9b89-ebff291c748a-kube-api-access-662np\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.119699 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-scripts\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.119736 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-combined-ca-bundle\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.119907 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4efbbb4-0e96-441e-a838-c5af61da8cd5-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.125486 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-scripts\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.126656 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-config-data\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.128718 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-combined-ca-bundle\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.146552 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-662np\" (UniqueName: \"kubernetes.io/projected/167f9139-7451-4312-9b89-ebff291c748a-kube-api-access-662np\") pod \"aodh-db-sync-8tvgc\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.312472 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.390512 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e4efbbb4-0e96-441e-a838-c5af61da8cd5","Type":"ContainerDied","Data":"ed74b4eacbeb9a612e51391670577d9f8a9c75ff2efa1b49b480cfff70349c06"} Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.390809 4949 scope.go:117] "RemoveContainer" containerID="e49cd6158108fb2bf9fc5ccfbf9f70c7c45791adfb015fa8e73ecd6a46d90c62" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.390937 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.398743 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerStarted","Data":"2e3ef0fabd38027d63a8034b703011ce906b563f0c4d3402a8d292a2585b057f"} Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.443399 4949 scope.go:117] "RemoveContainer" containerID="078d2290d421b47a707eb2c33501a3cc920656883d53e1acb613ff2ad3f07692" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.461213 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.470819 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.482647 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.510500 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.513871 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.519460 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.519839 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.520146 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.537935 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.594252 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-zglfk"] Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.594650 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" podUID="16d14031-a4a4-4965-a1fc-b385b05df235" containerName="dnsmasq-dns" containerID="cri-o://b0afa1d77ba60e4f91a6fd07801be2b1c3c3d04fe8ba25a009d59fca5dcf3dd5" gracePeriod=10 Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.663419 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" podUID="16d14031-a4a4-4965-a1fc-b385b05df235" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.219:5353: connect: connection refused" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.684117 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-logs\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.684447 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.685034 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.685107 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-config-data\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.685362 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfp55\" (UniqueName: \"kubernetes.io/projected/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-kube-api-access-pfp55\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.702609 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.240:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.745200 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.240:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.787969 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.788087 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.788113 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-config-data\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.788228 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfp55\" (UniqueName: \"kubernetes.io/projected/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-kube-api-access-pfp55\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.788263 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-logs\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.788751 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-logs\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.798348 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-config-data\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.812605 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfp55\" (UniqueName: \"kubernetes.io/projected/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-kube-api-access-pfp55\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.821984 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.827519 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " pod="openstack/nova-metadata-0" Feb 16 11:33:14 crc kubenswrapper[4949]: I0216 11:33:14.859527 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.281846 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4efbbb4-0e96-441e-a838-c5af61da8cd5" path="/var/lib/kubelet/pods/e4efbbb4-0e96-441e-a838-c5af61da8cd5/volumes" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.453212 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-8tvgc"] Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.540782 4949 generic.go:334] "Generic (PLEG): container finished" podID="16d14031-a4a4-4965-a1fc-b385b05df235" containerID="b0afa1d77ba60e4f91a6fd07801be2b1c3c3d04fe8ba25a009d59fca5dcf3dd5" exitCode=0 Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.540847 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" event={"ID":"16d14031-a4a4-4965-a1fc-b385b05df235","Type":"ContainerDied","Data":"b0afa1d77ba60e4f91a6fd07801be2b1c3c3d04fe8ba25a009d59fca5dcf3dd5"} Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.578444 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerStarted","Data":"741ff1e69c385f24184ef7c0fbef1432fecdac23f2944962b210aa063c401a5d"} Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.598461 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.627380 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-svc\") pod \"16d14031-a4a4-4965-a1fc-b385b05df235\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.627488 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-swift-storage-0\") pod \"16d14031-a4a4-4965-a1fc-b385b05df235\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.627562 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-sb\") pod \"16d14031-a4a4-4965-a1fc-b385b05df235\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.627696 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-nb\") pod \"16d14031-a4a4-4965-a1fc-b385b05df235\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.627727 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-config\") pod \"16d14031-a4a4-4965-a1fc-b385b05df235\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.627955 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn7kx\" (UniqueName: \"kubernetes.io/projected/16d14031-a4a4-4965-a1fc-b385b05df235-kube-api-access-xn7kx\") pod \"16d14031-a4a4-4965-a1fc-b385b05df235\" (UID: \"16d14031-a4a4-4965-a1fc-b385b05df235\") " Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.638425 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16d14031-a4a4-4965-a1fc-b385b05df235-kube-api-access-xn7kx" (OuterVolumeSpecName: "kube-api-access-xn7kx") pod "16d14031-a4a4-4965-a1fc-b385b05df235" (UID: "16d14031-a4a4-4965-a1fc-b385b05df235"). InnerVolumeSpecName "kube-api-access-xn7kx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.733013 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn7kx\" (UniqueName: \"kubernetes.io/projected/16d14031-a4a4-4965-a1fc-b385b05df235-kube-api-access-xn7kx\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.865071 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "16d14031-a4a4-4965-a1fc-b385b05df235" (UID: "16d14031-a4a4-4965-a1fc-b385b05df235"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.889136 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "16d14031-a4a4-4965-a1fc-b385b05df235" (UID: "16d14031-a4a4-4965-a1fc-b385b05df235"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.891612 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-config" (OuterVolumeSpecName: "config") pod "16d14031-a4a4-4965-a1fc-b385b05df235" (UID: "16d14031-a4a4-4965-a1fc-b385b05df235"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.900511 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "16d14031-a4a4-4965-a1fc-b385b05df235" (UID: "16d14031-a4a4-4965-a1fc-b385b05df235"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.907530 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "16d14031-a4a4-4965-a1fc-b385b05df235" (UID: "16d14031-a4a4-4965-a1fc-b385b05df235"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.941043 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.941095 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.941112 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.941126 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.941139 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d14031-a4a4-4965-a1fc-b385b05df235-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:15 crc kubenswrapper[4949]: I0216 11:33:15.986488 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:16 crc kubenswrapper[4949]: W0216 11:33:16.055764 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e26e7dd_1ef4_4d9a_a8ed_362c36c8edbf.slice/crio-079855774b3f77b0f372f09c237f58d21c45b2a4bfcf7388fd12d5f307d375e4 WatchSource:0}: Error finding container 079855774b3f77b0f372f09c237f58d21c45b2a4bfcf7388fd12d5f307d375e4: Status 404 returned error can't find the container with id 079855774b3f77b0f372f09c237f58d21c45b2a4bfcf7388fd12d5f307d375e4 Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.661507 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-8tvgc" event={"ID":"167f9139-7451-4312-9b89-ebff291c748a","Type":"ContainerStarted","Data":"1f243778a8fb2bbe950245bf45baa206096acd1ec523ab86c332a612b4b9502f"} Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.677305 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" event={"ID":"16d14031-a4a4-4965-a1fc-b385b05df235","Type":"ContainerDied","Data":"3f9c414e86e6fee300c25143da51b40d71575a509327b4307ec421fb1489891d"} Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.677379 4949 scope.go:117] "RemoveContainer" containerID="b0afa1d77ba60e4f91a6fd07801be2b1c3c3d04fe8ba25a009d59fca5dcf3dd5" Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.677605 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-zglfk" Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.731087 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf","Type":"ContainerStarted","Data":"1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391"} Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.731151 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf","Type":"ContainerStarted","Data":"079855774b3f77b0f372f09c237f58d21c45b2a4bfcf7388fd12d5f307d375e4"} Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.759738 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerStarted","Data":"3c8af2a289171b3fda9efa6c088788edc11ee2bea583ea311b685232d173cda1"} Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.768432 4949 scope.go:117] "RemoveContainer" containerID="699c9d2b2b02f2f410da90de2db4fc5edcb2890d9288751a6f41863f330e7dc8" Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.771029 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-zglfk"] Feb 16 11:33:16 crc kubenswrapper[4949]: I0216 11:33:16.787618 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-zglfk"] Feb 16 11:33:17 crc kubenswrapper[4949]: I0216 11:33:17.253703 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16d14031-a4a4-4965-a1fc-b385b05df235" path="/var/lib/kubelet/pods/16d14031-a4a4-4965-a1fc-b385b05df235/volumes" Feb 16 11:33:17 crc kubenswrapper[4949]: I0216 11:33:17.781474 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf","Type":"ContainerStarted","Data":"80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec"} Feb 16 11:33:17 crc kubenswrapper[4949]: I0216 11:33:17.789326 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerStarted","Data":"6995c4611db96d4813f2fc4c4286930c303c3682bb7ee129cef0a6adc7fa4b10"} Feb 16 11:33:17 crc kubenswrapper[4949]: I0216 11:33:17.817752 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.817730544 podStartE2EDuration="3.817730544s" podCreationTimestamp="2026-02-16 11:33:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:17.816260242 +0000 UTC m=+1587.445594407" watchObservedRunningTime="2026-02-16 11:33:17.817730544 +0000 UTC m=+1587.447064709" Feb 16 11:33:19 crc kubenswrapper[4949]: I0216 11:33:19.860027 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 16 11:33:19 crc kubenswrapper[4949]: I0216 11:33:19.860833 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 16 11:33:20 crc kubenswrapper[4949]: I0216 11:33:20.028818 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerStarted","Data":"11026931ad6332ebe225a5e7f86e5df0ffa9c24b6565abb0b590e31028e54547"} Feb 16 11:33:20 crc kubenswrapper[4949]: I0216 11:33:20.029363 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:33:20 crc kubenswrapper[4949]: I0216 11:33:20.031970 4949 generic.go:334] "Generic (PLEG): container finished" podID="6a1e0048-b760-4b8c-a65b-7f0224833721" containerID="6b827f03206bb4c4e85fc8cee05019ee2e5acc8d8bbdbfe9b7315957449e82e1" exitCode=0 Feb 16 11:33:20 crc kubenswrapper[4949]: I0216 11:33:20.032067 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j9tzp" event={"ID":"6a1e0048-b760-4b8c-a65b-7f0224833721","Type":"ContainerDied","Data":"6b827f03206bb4c4e85fc8cee05019ee2e5acc8d8bbdbfe9b7315957449e82e1"} Feb 16 11:33:20 crc kubenswrapper[4949]: I0216 11:33:20.065657 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.664366256 podStartE2EDuration="8.065633475s" podCreationTimestamp="2026-02-16 11:33:12 +0000 UTC" firstStartedPulling="2026-02-16 11:33:13.764641633 +0000 UTC m=+1583.393975798" lastFinishedPulling="2026-02-16 11:33:19.165908852 +0000 UTC m=+1588.795243017" observedRunningTime="2026-02-16 11:33:20.049455212 +0000 UTC m=+1589.678789377" watchObservedRunningTime="2026-02-16 11:33:20.065633475 +0000 UTC m=+1589.694967640" Feb 16 11:33:20 crc kubenswrapper[4949]: E0216 11:33:20.892710 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:21 crc kubenswrapper[4949]: I0216 11:33:21.085999 4949 generic.go:334] "Generic (PLEG): container finished" podID="a0bd872f-2560-48de-997e-a2ce07f4f0b7" containerID="ba8ab6c66a20430c703e5c48bd3835c082ca95f7daaae276285a64f3466720d9" exitCode=0 Feb 16 11:33:21 crc kubenswrapper[4949]: I0216 11:33:21.086186 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" event={"ID":"a0bd872f-2560-48de-997e-a2ce07f4f0b7","Type":"ContainerDied","Data":"ba8ab6c66a20430c703e5c48bd3835c082ca95f7daaae276285a64f3466720d9"} Feb 16 11:33:21 crc kubenswrapper[4949]: E0216 11:33:21.115858 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.634646 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.635988 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.651757 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.660663 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.678071 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.697287 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.816117 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-combined-ca-bundle\") pod \"6a1e0048-b760-4b8c-a65b-7f0224833721\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.816282 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bjvz\" (UniqueName: \"kubernetes.io/projected/a0bd872f-2560-48de-997e-a2ce07f4f0b7-kube-api-access-7bjvz\") pod \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.816500 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-combined-ca-bundle\") pod \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.816526 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-scripts\") pod \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.816553 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-config-data\") pod \"6a1e0048-b760-4b8c-a65b-7f0224833721\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.817215 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-scripts\") pod \"6a1e0048-b760-4b8c-a65b-7f0224833721\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.817321 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-config-data\") pod \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\" (UID: \"a0bd872f-2560-48de-997e-a2ce07f4f0b7\") " Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.817352 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4cnh\" (UniqueName: \"kubernetes.io/projected/6a1e0048-b760-4b8c-a65b-7f0224833721-kube-api-access-n4cnh\") pod \"6a1e0048-b760-4b8c-a65b-7f0224833721\" (UID: \"6a1e0048-b760-4b8c-a65b-7f0224833721\") " Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.824340 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-scripts" (OuterVolumeSpecName: "scripts") pod "a0bd872f-2560-48de-997e-a2ce07f4f0b7" (UID: "a0bd872f-2560-48de-997e-a2ce07f4f0b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.824351 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-scripts" (OuterVolumeSpecName: "scripts") pod "6a1e0048-b760-4b8c-a65b-7f0224833721" (UID: "6a1e0048-b760-4b8c-a65b-7f0224833721"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.824781 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a1e0048-b760-4b8c-a65b-7f0224833721-kube-api-access-n4cnh" (OuterVolumeSpecName: "kube-api-access-n4cnh") pod "6a1e0048-b760-4b8c-a65b-7f0224833721" (UID: "6a1e0048-b760-4b8c-a65b-7f0224833721"). InnerVolumeSpecName "kube-api-access-n4cnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.833258 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0bd872f-2560-48de-997e-a2ce07f4f0b7-kube-api-access-7bjvz" (OuterVolumeSpecName: "kube-api-access-7bjvz") pod "a0bd872f-2560-48de-997e-a2ce07f4f0b7" (UID: "a0bd872f-2560-48de-997e-a2ce07f4f0b7"). InnerVolumeSpecName "kube-api-access-7bjvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.871808 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-config-data" (OuterVolumeSpecName: "config-data") pod "a0bd872f-2560-48de-997e-a2ce07f4f0b7" (UID: "a0bd872f-2560-48de-997e-a2ce07f4f0b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.878281 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0bd872f-2560-48de-997e-a2ce07f4f0b7" (UID: "a0bd872f-2560-48de-997e-a2ce07f4f0b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.880105 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-config-data" (OuterVolumeSpecName: "config-data") pod "6a1e0048-b760-4b8c-a65b-7f0224833721" (UID: "6a1e0048-b760-4b8c-a65b-7f0224833721"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.882279 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a1e0048-b760-4b8c-a65b-7f0224833721" (UID: "6a1e0048-b760-4b8c-a65b-7f0224833721"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.924351 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.924570 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.924593 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.924604 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4cnh\" (UniqueName: \"kubernetes.io/projected/6a1e0048-b760-4b8c-a65b-7f0224833721-kube-api-access-n4cnh\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.924681 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e0048-b760-4b8c-a65b-7f0224833721-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.924691 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bjvz\" (UniqueName: \"kubernetes.io/projected/a0bd872f-2560-48de-997e-a2ce07f4f0b7-kube-api-access-7bjvz\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.924720 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:23 crc kubenswrapper[4949]: I0216 11:33:23.924729 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0bd872f-2560-48de-997e-a2ce07f4f0b7-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.124768 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j9tzp" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.127831 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j9tzp" event={"ID":"6a1e0048-b760-4b8c-a65b-7f0224833721","Type":"ContainerDied","Data":"85ad27306e3b0384d42221535e9e542ee3755efe9269b6003bae3367a0e19a5a"} Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.127892 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85ad27306e3b0384d42221535e9e542ee3755efe9269b6003bae3367a0e19a5a" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.130878 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" event={"ID":"a0bd872f-2560-48de-997e-a2ce07f4f0b7","Type":"ContainerDied","Data":"6712ca4aa4e9379cb141283c0a3bb82f50c93ab1821dc32a911f543f3ab9d0a7"} Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.130918 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6712ca4aa4e9379cb141283c0a3bb82f50c93ab1821dc32a911f543f3ab9d0a7" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.130976 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-hm2m6" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.133281 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.143703 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.491606 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-2qhff"] Feb 16 11:33:24 crc kubenswrapper[4949]: E0216 11:33:24.497878 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16d14031-a4a4-4965-a1fc-b385b05df235" containerName="dnsmasq-dns" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.497924 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="16d14031-a4a4-4965-a1fc-b385b05df235" containerName="dnsmasq-dns" Feb 16 11:33:24 crc kubenswrapper[4949]: E0216 11:33:24.497977 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a1e0048-b760-4b8c-a65b-7f0224833721" containerName="nova-manage" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.497984 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a1e0048-b760-4b8c-a65b-7f0224833721" containerName="nova-manage" Feb 16 11:33:24 crc kubenswrapper[4949]: E0216 11:33:24.498027 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0bd872f-2560-48de-997e-a2ce07f4f0b7" containerName="nova-cell1-conductor-db-sync" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.498033 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0bd872f-2560-48de-997e-a2ce07f4f0b7" containerName="nova-cell1-conductor-db-sync" Feb 16 11:33:24 crc kubenswrapper[4949]: E0216 11:33:24.498062 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16d14031-a4a4-4965-a1fc-b385b05df235" containerName="init" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.498067 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="16d14031-a4a4-4965-a1fc-b385b05df235" containerName="init" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.499647 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a1e0048-b760-4b8c-a65b-7f0224833721" containerName="nova-manage" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.499709 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0bd872f-2560-48de-997e-a2ce07f4f0b7" containerName="nova-cell1-conductor-db-sync" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.499753 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="16d14031-a4a4-4965-a1fc-b385b05df235" containerName="dnsmasq-dns" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.503323 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.572630 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-2qhff"] Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.677270 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.678241 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.678765 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-config\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.678899 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.678989 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m298n\" (UniqueName: \"kubernetes.io/projected/d0fc7610-06f6-47af-b194-113413f5b260-kube-api-access-m298n\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.679280 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.781424 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.781487 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.781534 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-config\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.781571 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.781607 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m298n\" (UniqueName: \"kubernetes.io/projected/d0fc7610-06f6-47af-b194-113413f5b260-kube-api-access-m298n\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.781676 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.782667 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.782663 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.783410 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-config\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.783459 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.783962 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.809296 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m298n\" (UniqueName: \"kubernetes.io/projected/d0fc7610-06f6-47af-b194-113413f5b260-kube-api-access-m298n\") pod \"dnsmasq-dns-79b5d74c8c-2qhff\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.843934 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.846383 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.853391 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.860640 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.860954 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.862543 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.918031 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.990805 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6395848e-1b80-4956-a3a6-80941876e3f9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.990944 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6395848e-1b80-4956-a3a6-80941876e3f9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:24 crc kubenswrapper[4949]: I0216 11:33:24.991010 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clpx8\" (UniqueName: \"kubernetes.io/projected/6395848e-1b80-4956-a3a6-80941876e3f9-kube-api-access-clpx8\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.048579 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.103080 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6395848e-1b80-4956-a3a6-80941876e3f9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.103158 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clpx8\" (UniqueName: \"kubernetes.io/projected/6395848e-1b80-4956-a3a6-80941876e3f9-kube-api-access-clpx8\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.103314 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6395848e-1b80-4956-a3a6-80941876e3f9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.138013 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6395848e-1b80-4956-a3a6-80941876e3f9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.144884 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6395848e-1b80-4956-a3a6-80941876e3f9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.161947 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clpx8\" (UniqueName: \"kubernetes.io/projected/6395848e-1b80-4956-a3a6-80941876e3f9-kube-api-access-clpx8\") pod \"nova-cell1-conductor-0\" (UID: \"6395848e-1b80-4956-a3a6-80941876e3f9\") " pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.194641 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.218386 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.218710 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" containerName="nova-scheduler-scheduler" containerID="cri-o://934e0ddd69595392eb7a07f84670c395bd782ce3a941b7cbf57d80942351f48c" gracePeriod=30 Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.275355 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-8tvgc" event={"ID":"167f9139-7451-4312-9b89-ebff291c748a","Type":"ContainerStarted","Data":"2c69fed30e76ccad4a5e1a07767e49ecbac0b90b62d8597551ceb5e02798567b"} Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.314577 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.384749 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-8tvgc" podStartSLOduration=3.940730431 podStartE2EDuration="12.384724335s" podCreationTimestamp="2026-02-16 11:33:13 +0000 UTC" firstStartedPulling="2026-02-16 11:33:15.539985568 +0000 UTC m=+1585.169319733" lastFinishedPulling="2026-02-16 11:33:23.983979472 +0000 UTC m=+1593.613313637" observedRunningTime="2026-02-16 11:33:25.291279942 +0000 UTC m=+1594.920614117" watchObservedRunningTime="2026-02-16 11:33:25.384724335 +0000 UTC m=+1595.014058500" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.645021 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-2qhff"] Feb 16 11:33:25 crc kubenswrapper[4949]: W0216 11:33:25.691379 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0fc7610_06f6_47af_b194_113413f5b260.slice/crio-c6cc995d9c48c00761150b83ac1f350cbf78ad70801937d6318ea52b1c0b0fb1 WatchSource:0}: Error finding container c6cc995d9c48c00761150b83ac1f350cbf78ad70801937d6318ea52b1c0b0fb1: Status 404 returned error can't find the container with id c6cc995d9c48c00761150b83ac1f350cbf78ad70801937d6318ea52b1c0b0fb1 Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.904315 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:33:25 crc kubenswrapper[4949]: I0216 11:33:25.904469 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.082620 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.292358 4949 generic.go:334] "Generic (PLEG): container finished" podID="d0fc7610-06f6-47af-b194-113413f5b260" containerID="e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2" exitCode=0 Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.293701 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" event={"ID":"d0fc7610-06f6-47af-b194-113413f5b260","Type":"ContainerDied","Data":"e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2"} Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.293739 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" event={"ID":"d0fc7610-06f6-47af-b194-113413f5b260","Type":"ContainerStarted","Data":"c6cc995d9c48c00761150b83ac1f350cbf78ad70801937d6318ea52b1c0b0fb1"} Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.318873 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6395848e-1b80-4956-a3a6-80941876e3f9","Type":"ContainerStarted","Data":"9c54c534a76fb1901af72f95974d58614612b0f94de553fce5f66bb3701451da"} Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.319033 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-log" containerID="cri-o://1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391" gracePeriod=30 Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.319237 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-log" containerID="cri-o://3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87" gracePeriod=30 Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.319676 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-api" containerID="cri-o://a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396" gracePeriod=30 Feb 16 11:33:26 crc kubenswrapper[4949]: I0216 11:33:26.319767 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-metadata" containerID="cri-o://80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec" gracePeriod=30 Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.392155 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" event={"ID":"d0fc7610-06f6-47af-b194-113413f5b260","Type":"ContainerStarted","Data":"4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082"} Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.392769 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.395956 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6395848e-1b80-4956-a3a6-80941876e3f9","Type":"ContainerStarted","Data":"ea7276aa40993973fc0eb7fb88fd65ae990d3d46aca22679b4f4530de1de25fa"} Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.397228 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.404930 4949 generic.go:334] "Generic (PLEG): container finished" podID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerID="3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87" exitCode=143 Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.405052 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9e673569-9ea0-48bd-8a23-d3766abebe4d","Type":"ContainerDied","Data":"3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87"} Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.417562 4949 generic.go:334] "Generic (PLEG): container finished" podID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerID="1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391" exitCode=143 Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.418074 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf","Type":"ContainerDied","Data":"1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391"} Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.425035 4949 generic.go:334] "Generic (PLEG): container finished" podID="b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" containerID="934e0ddd69595392eb7a07f84670c395bd782ce3a941b7cbf57d80942351f48c" exitCode=0 Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.425089 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530","Type":"ContainerDied","Data":"934e0ddd69595392eb7a07f84670c395bd782ce3a941b7cbf57d80942351f48c"} Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.435024 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" podStartSLOduration=3.434997174 podStartE2EDuration="3.434997174s" podCreationTimestamp="2026-02-16 11:33:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:27.425378869 +0000 UTC m=+1597.054713034" watchObservedRunningTime="2026-02-16 11:33:27.434997174 +0000 UTC m=+1597.064331339" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.653811 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.683102 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.683049398 podStartE2EDuration="3.683049398s" podCreationTimestamp="2026-02-16 11:33:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:27.460947626 +0000 UTC m=+1597.090281791" watchObservedRunningTime="2026-02-16 11:33:27.683049398 +0000 UTC m=+1597.312383563" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.818671 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-config-data\") pod \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.818757 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-combined-ca-bundle\") pod \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.818834 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kw58l\" (UniqueName: \"kubernetes.io/projected/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-kube-api-access-kw58l\") pod \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\" (UID: \"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530\") " Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.824836 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-kube-api-access-kw58l" (OuterVolumeSpecName: "kube-api-access-kw58l") pod "b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" (UID: "b6f11b9b-6fb0-4b7d-b6bf-15340c23d530"). InnerVolumeSpecName "kube-api-access-kw58l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.903570 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" (UID: "b6f11b9b-6fb0-4b7d-b6bf-15340c23d530"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.928356 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.928613 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kw58l\" (UniqueName: \"kubernetes.io/projected/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-kube-api-access-kw58l\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:27 crc kubenswrapper[4949]: I0216 11:33:27.935154 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-config-data" (OuterVolumeSpecName: "config-data") pod "b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" (UID: "b6f11b9b-6fb0-4b7d-b6bf-15340c23d530"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.031094 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.446703 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b6f11b9b-6fb0-4b7d-b6bf-15340c23d530","Type":"ContainerDied","Data":"6c9be516b026e411e95e3bb728be946087fb9097a659a15644e4637157a34dec"} Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.446760 4949 scope.go:117] "RemoveContainer" containerID="934e0ddd69595392eb7a07f84670c395bd782ce3a941b7cbf57d80942351f48c" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.446894 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.508992 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.526479 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.550724 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:28 crc kubenswrapper[4949]: E0216 11:33:28.551351 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" containerName="nova-scheduler-scheduler" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.551373 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" containerName="nova-scheduler-scheduler" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.551648 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" containerName="nova-scheduler-scheduler" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.552716 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.564973 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.567386 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.647928 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-config-data\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.648511 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tftfv\" (UniqueName: \"kubernetes.io/projected/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-kube-api-access-tftfv\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.648672 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.751590 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-config-data\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.751794 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tftfv\" (UniqueName: \"kubernetes.io/projected/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-kube-api-access-tftfv\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.751876 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.757956 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-config-data\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.758893 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.776913 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tftfv\" (UniqueName: \"kubernetes.io/projected/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-kube-api-access-tftfv\") pod \"nova-scheduler-0\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " pod="openstack/nova-scheduler-0" Feb 16 11:33:28 crc kubenswrapper[4949]: I0216 11:33:28.900074 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:33:29 crc kubenswrapper[4949]: I0216 11:33:29.256917 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6f11b9b-6fb0-4b7d-b6bf-15340c23d530" path="/var/lib/kubelet/pods/b6f11b9b-6fb0-4b7d-b6bf-15340c23d530/volumes" Feb 16 11:33:29 crc kubenswrapper[4949]: I0216 11:33:29.470735 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.014376 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.015493 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="ceilometer-central-agent" containerID="cri-o://741ff1e69c385f24184ef7c0fbef1432fecdac23f2944962b210aa063c401a5d" gracePeriod=30 Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.017743 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="proxy-httpd" containerID="cri-o://11026931ad6332ebe225a5e7f86e5df0ffa9c24b6565abb0b590e31028e54547" gracePeriod=30 Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.017993 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="ceilometer-notification-agent" containerID="cri-o://3c8af2a289171b3fda9efa6c088788edc11ee2bea583ea311b685232d173cda1" gracePeriod=30 Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.018067 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="sg-core" containerID="cri-o://6995c4611db96d4813f2fc4c4286930c303c3682bb7ee129cef0a6adc7fa4b10" gracePeriod=30 Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.031745 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.248:3000/\": EOF" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.347302 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.494680 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1","Type":"ContainerStarted","Data":"dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5"} Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.494725 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1","Type":"ContainerStarted","Data":"0643148a9bf6032e6e5cbf28b40b7910eb2945ee96944cfa56446487c7afc011"} Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.498523 4949 generic.go:334] "Generic (PLEG): container finished" podID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerID="a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396" exitCode=0 Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.498580 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.498605 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9e673569-9ea0-48bd-8a23-d3766abebe4d","Type":"ContainerDied","Data":"a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396"} Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.498661 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9e673569-9ea0-48bd-8a23-d3766abebe4d","Type":"ContainerDied","Data":"5080d2f94ddde178c55a62b04cfc8eca0a0d25d5af14c34a3d1a4990fe9847a0"} Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.498679 4949 scope.go:117] "RemoveContainer" containerID="a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.502296 4949 generic.go:334] "Generic (PLEG): container finished" podID="167f9139-7451-4312-9b89-ebff291c748a" containerID="2c69fed30e76ccad4a5e1a07767e49ecbac0b90b62d8597551ceb5e02798567b" exitCode=0 Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.502362 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-8tvgc" event={"ID":"167f9139-7451-4312-9b89-ebff291c748a","Type":"ContainerDied","Data":"2c69fed30e76ccad4a5e1a07767e49ecbac0b90b62d8597551ceb5e02798567b"} Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.506579 4949 generic.go:334] "Generic (PLEG): container finished" podID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerID="11026931ad6332ebe225a5e7f86e5df0ffa9c24b6565abb0b590e31028e54547" exitCode=0 Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.506603 4949 generic.go:334] "Generic (PLEG): container finished" podID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerID="6995c4611db96d4813f2fc4c4286930c303c3682bb7ee129cef0a6adc7fa4b10" exitCode=2 Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.506627 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerDied","Data":"11026931ad6332ebe225a5e7f86e5df0ffa9c24b6565abb0b590e31028e54547"} Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.506648 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerDied","Data":"6995c4611db96d4813f2fc4c4286930c303c3682bb7ee129cef0a6adc7fa4b10"} Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.522312 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e673569-9ea0-48bd-8a23-d3766abebe4d-logs\") pod \"9e673569-9ea0-48bd-8a23-d3766abebe4d\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.522524 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-config-data\") pod \"9e673569-9ea0-48bd-8a23-d3766abebe4d\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.522652 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-combined-ca-bundle\") pod \"9e673569-9ea0-48bd-8a23-d3766abebe4d\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.522693 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfdtd\" (UniqueName: \"kubernetes.io/projected/9e673569-9ea0-48bd-8a23-d3766abebe4d-kube-api-access-tfdtd\") pod \"9e673569-9ea0-48bd-8a23-d3766abebe4d\" (UID: \"9e673569-9ea0-48bd-8a23-d3766abebe4d\") " Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.522941 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e673569-9ea0-48bd-8a23-d3766abebe4d-logs" (OuterVolumeSpecName: "logs") pod "9e673569-9ea0-48bd-8a23-d3766abebe4d" (UID: "9e673569-9ea0-48bd-8a23-d3766abebe4d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.523609 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e673569-9ea0-48bd-8a23-d3766abebe4d-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.533500 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e673569-9ea0-48bd-8a23-d3766abebe4d-kube-api-access-tfdtd" (OuterVolumeSpecName: "kube-api-access-tfdtd") pod "9e673569-9ea0-48bd-8a23-d3766abebe4d" (UID: "9e673569-9ea0-48bd-8a23-d3766abebe4d"). InnerVolumeSpecName "kube-api-access-tfdtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.545219 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.545192417 podStartE2EDuration="2.545192417s" podCreationTimestamp="2026-02-16 11:33:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:30.522281162 +0000 UTC m=+1600.151615337" watchObservedRunningTime="2026-02-16 11:33:30.545192417 +0000 UTC m=+1600.174526572" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.583880 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-config-data" (OuterVolumeSpecName: "config-data") pod "9e673569-9ea0-48bd-8a23-d3766abebe4d" (UID: "9e673569-9ea0-48bd-8a23-d3766abebe4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.586867 4949 scope.go:117] "RemoveContainer" containerID="3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.590284 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e673569-9ea0-48bd-8a23-d3766abebe4d" (UID: "9e673569-9ea0-48bd-8a23-d3766abebe4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.614962 4949 scope.go:117] "RemoveContainer" containerID="a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396" Feb 16 11:33:30 crc kubenswrapper[4949]: E0216 11:33:30.615550 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396\": container with ID starting with a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396 not found: ID does not exist" containerID="a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.615602 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396"} err="failed to get container status \"a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396\": rpc error: code = NotFound desc = could not find container \"a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396\": container with ID starting with a4486b962deabfc98ab8e4fc94066bc75117529e05df77c95d9cd4b22fbc7396 not found: ID does not exist" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.615633 4949 scope.go:117] "RemoveContainer" containerID="3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87" Feb 16 11:33:30 crc kubenswrapper[4949]: E0216 11:33:30.616422 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87\": container with ID starting with 3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87 not found: ID does not exist" containerID="3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.616460 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87"} err="failed to get container status \"3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87\": rpc error: code = NotFound desc = could not find container \"3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87\": container with ID starting with 3a8c7afa2b8e3c8d0b605cede3d3c396fcc6dda0e30d00c61ed7a18c4e892f87 not found: ID does not exist" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.626432 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.626461 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfdtd\" (UniqueName: \"kubernetes.io/projected/9e673569-9ea0-48bd-8a23-d3766abebe4d-kube-api-access-tfdtd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.626472 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e673569-9ea0-48bd-8a23-d3766abebe4d-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.871439 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.887782 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.901823 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:30 crc kubenswrapper[4949]: E0216 11:33:30.902714 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-log" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.902765 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-log" Feb 16 11:33:30 crc kubenswrapper[4949]: E0216 11:33:30.902800 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-api" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.902816 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-api" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.903276 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-api" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.903324 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" containerName="nova-api-log" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.906553 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.908687 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.912026 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.919574 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.928675 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.940564 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kznpg\" (UniqueName: \"kubernetes.io/projected/9b6adc35-266a-478c-ae17-63a88705e8a9-kube-api-access-kznpg\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.940628 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-public-tls-certs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.940653 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-config-data\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.940715 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b6adc35-266a-478c-ae17-63a88705e8a9-logs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.940762 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:30 crc kubenswrapper[4949]: I0216 11:33:30.940792 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.043518 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kznpg\" (UniqueName: \"kubernetes.io/projected/9b6adc35-266a-478c-ae17-63a88705e8a9-kube-api-access-kznpg\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.043589 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-public-tls-certs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.043623 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-config-data\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.043705 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b6adc35-266a-478c-ae17-63a88705e8a9-logs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.043764 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.043802 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.044330 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b6adc35-266a-478c-ae17-63a88705e8a9-logs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.048291 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.049498 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.050016 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-public-tls-certs\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.052284 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-config-data\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.065013 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kznpg\" (UniqueName: \"kubernetes.io/projected/9b6adc35-266a-478c-ae17-63a88705e8a9-kube-api-access-kznpg\") pod \"nova-api-0\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.232524 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.305480 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e673569-9ea0-48bd-8a23-d3766abebe4d" path="/var/lib/kubelet/pods/9e673569-9ea0-48bd-8a23-d3766abebe4d/volumes" Feb 16 11:33:31 crc kubenswrapper[4949]: E0216 11:33:31.402267 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc46769b4_dd47_47eb_ae71_f1cf844b93ba.slice/crio-3c8af2a289171b3fda9efa6c088788edc11ee2bea583ea311b685232d173cda1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.594720 4949 generic.go:334] "Generic (PLEG): container finished" podID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerID="3c8af2a289171b3fda9efa6c088788edc11ee2bea583ea311b685232d173cda1" exitCode=0 Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.594770 4949 generic.go:334] "Generic (PLEG): container finished" podID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerID="741ff1e69c385f24184ef7c0fbef1432fecdac23f2944962b210aa063c401a5d" exitCode=0 Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.595022 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerDied","Data":"3c8af2a289171b3fda9efa6c088788edc11ee2bea583ea311b685232d173cda1"} Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.595066 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerDied","Data":"741ff1e69c385f24184ef7c0fbef1432fecdac23f2944962b210aa063c401a5d"} Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.774416 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.884453 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-scripts\") pod \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.884556 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-run-httpd\") pod \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.884746 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-combined-ca-bundle\") pod \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.884808 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vg5x\" (UniqueName: \"kubernetes.io/projected/c46769b4-dd47-47eb-ae71-f1cf844b93ba-kube-api-access-7vg5x\") pod \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.884940 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-log-httpd\") pod \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.884963 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-config-data\") pod \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.885032 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-sg-core-conf-yaml\") pod \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\" (UID: \"c46769b4-dd47-47eb-ae71-f1cf844b93ba\") " Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.886090 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c46769b4-dd47-47eb-ae71-f1cf844b93ba" (UID: "c46769b4-dd47-47eb-ae71-f1cf844b93ba"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.888085 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c46769b4-dd47-47eb-ae71-f1cf844b93ba" (UID: "c46769b4-dd47-47eb-ae71-f1cf844b93ba"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.893555 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c46769b4-dd47-47eb-ae71-f1cf844b93ba-kube-api-access-7vg5x" (OuterVolumeSpecName: "kube-api-access-7vg5x") pod "c46769b4-dd47-47eb-ae71-f1cf844b93ba" (UID: "c46769b4-dd47-47eb-ae71-f1cf844b93ba"). InnerVolumeSpecName "kube-api-access-7vg5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.894069 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-scripts" (OuterVolumeSpecName: "scripts") pod "c46769b4-dd47-47eb-ae71-f1cf844b93ba" (UID: "c46769b4-dd47-47eb-ae71-f1cf844b93ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:31 crc kubenswrapper[4949]: I0216 11:33:31.952652 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c46769b4-dd47-47eb-ae71-f1cf844b93ba" (UID: "c46769b4-dd47-47eb-ae71-f1cf844b93ba"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.007569 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vg5x\" (UniqueName: \"kubernetes.io/projected/c46769b4-dd47-47eb-ae71-f1cf844b93ba-kube-api-access-7vg5x\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.007606 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.007619 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.007644 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.007660 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46769b4-dd47-47eb-ae71-f1cf844b93ba-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.081438 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c46769b4-dd47-47eb-ae71-f1cf844b93ba" (UID: "c46769b4-dd47-47eb-ae71-f1cf844b93ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.110088 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.216865 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.221916 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-config-data" (OuterVolumeSpecName: "config-data") pod "c46769b4-dd47-47eb-ae71-f1cf844b93ba" (UID: "c46769b4-dd47-47eb-ae71-f1cf844b93ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.222578 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46769b4-dd47-47eb-ae71-f1cf844b93ba-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: W0216 11:33:32.273448 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b6adc35_266a_478c_ae17_63a88705e8a9.slice/crio-f0672e269bc6e22d366d35710c5099e97ee2670d2d9c23e5de294e3aa57570cd WatchSource:0}: Error finding container f0672e269bc6e22d366d35710c5099e97ee2670d2d9c23e5de294e3aa57570cd: Status 404 returned error can't find the container with id f0672e269bc6e22d366d35710c5099e97ee2670d2d9c23e5de294e3aa57570cd Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.285841 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.324532 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-config-data\") pod \"167f9139-7451-4312-9b89-ebff291c748a\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.325195 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-combined-ca-bundle\") pod \"167f9139-7451-4312-9b89-ebff291c748a\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.325579 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-662np\" (UniqueName: \"kubernetes.io/projected/167f9139-7451-4312-9b89-ebff291c748a-kube-api-access-662np\") pod \"167f9139-7451-4312-9b89-ebff291c748a\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.325665 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-scripts\") pod \"167f9139-7451-4312-9b89-ebff291c748a\" (UID: \"167f9139-7451-4312-9b89-ebff291c748a\") " Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.333481 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-scripts" (OuterVolumeSpecName: "scripts") pod "167f9139-7451-4312-9b89-ebff291c748a" (UID: "167f9139-7451-4312-9b89-ebff291c748a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.333507 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/167f9139-7451-4312-9b89-ebff291c748a-kube-api-access-662np" (OuterVolumeSpecName: "kube-api-access-662np") pod "167f9139-7451-4312-9b89-ebff291c748a" (UID: "167f9139-7451-4312-9b89-ebff291c748a"). InnerVolumeSpecName "kube-api-access-662np". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.363969 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-config-data" (OuterVolumeSpecName: "config-data") pod "167f9139-7451-4312-9b89-ebff291c748a" (UID: "167f9139-7451-4312-9b89-ebff291c748a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.371315 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "167f9139-7451-4312-9b89-ebff291c748a" (UID: "167f9139-7451-4312-9b89-ebff291c748a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.431095 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.431144 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.431159 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-662np\" (UniqueName: \"kubernetes.io/projected/167f9139-7451-4312-9b89-ebff291c748a-kube-api-access-662np\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.431191 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/167f9139-7451-4312-9b89-ebff291c748a-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.608965 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-8tvgc" event={"ID":"167f9139-7451-4312-9b89-ebff291c748a","Type":"ContainerDied","Data":"1f243778a8fb2bbe950245bf45baa206096acd1ec523ab86c332a612b4b9502f"} Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.609013 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f243778a8fb2bbe950245bf45baa206096acd1ec523ab86c332a612b4b9502f" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.609084 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-8tvgc" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.616025 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46769b4-dd47-47eb-ae71-f1cf844b93ba","Type":"ContainerDied","Data":"2e3ef0fabd38027d63a8034b703011ce906b563f0c4d3402a8d292a2585b057f"} Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.616085 4949 scope.go:117] "RemoveContainer" containerID="11026931ad6332ebe225a5e7f86e5df0ffa9c24b6565abb0b590e31028e54547" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.616342 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.625478 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9b6adc35-266a-478c-ae17-63a88705e8a9","Type":"ContainerStarted","Data":"6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72"} Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.625534 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9b6adc35-266a-478c-ae17-63a88705e8a9","Type":"ContainerStarted","Data":"f0672e269bc6e22d366d35710c5099e97ee2670d2d9c23e5de294e3aa57570cd"} Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.664336 4949 scope.go:117] "RemoveContainer" containerID="6995c4611db96d4813f2fc4c4286930c303c3682bb7ee129cef0a6adc7fa4b10" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.688821 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.709579 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.722202 4949 scope.go:117] "RemoveContainer" containerID="3c8af2a289171b3fda9efa6c088788edc11ee2bea583ea311b685232d173cda1" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.735930 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:32 crc kubenswrapper[4949]: E0216 11:33:32.736571 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="proxy-httpd" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736591 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="proxy-httpd" Feb 16 11:33:32 crc kubenswrapper[4949]: E0216 11:33:32.736608 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="ceilometer-central-agent" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736615 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="ceilometer-central-agent" Feb 16 11:33:32 crc kubenswrapper[4949]: E0216 11:33:32.736636 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="sg-core" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736642 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="sg-core" Feb 16 11:33:32 crc kubenswrapper[4949]: E0216 11:33:32.736680 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="ceilometer-notification-agent" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736686 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="ceilometer-notification-agent" Feb 16 11:33:32 crc kubenswrapper[4949]: E0216 11:33:32.736701 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="167f9139-7451-4312-9b89-ebff291c748a" containerName="aodh-db-sync" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736707 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="167f9139-7451-4312-9b89-ebff291c748a" containerName="aodh-db-sync" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736924 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="proxy-httpd" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736939 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="167f9139-7451-4312-9b89-ebff291c748a" containerName="aodh-db-sync" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736959 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="sg-core" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736969 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="ceilometer-notification-agent" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.736982 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" containerName="ceilometer-central-agent" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.739242 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.742491 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.742773 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.765244 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.799053 4949 scope.go:117] "RemoveContainer" containerID="741ff1e69c385f24184ef7c0fbef1432fecdac23f2944962b210aa063c401a5d" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.850028 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-log-httpd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.850580 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjlkd\" (UniqueName: \"kubernetes.io/projected/4decc081-75bf-47cd-9335-31ed1f8c761e-kube-api-access-cjlkd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.851227 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-run-httpd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.851377 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.851470 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-config-data\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.851524 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-scripts\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.851691 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.879390 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:32 crc kubenswrapper[4949]: E0216 11:33:32.881226 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-cjlkd log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="4decc081-75bf-47cd-9335-31ed1f8c761e" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.954664 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-run-httpd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.954990 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.955092 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-config-data\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.955200 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-scripts\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.955317 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.955431 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-log-httpd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.955565 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjlkd\" (UniqueName: \"kubernetes.io/projected/4decc081-75bf-47cd-9335-31ed1f8c761e-kube-api-access-cjlkd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.955708 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-log-httpd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.955361 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-run-httpd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.959708 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.961155 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-config-data\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.961245 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.977083 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-scripts\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:32 crc kubenswrapper[4949]: I0216 11:33:32.977924 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjlkd\" (UniqueName: \"kubernetes.io/projected/4decc081-75bf-47cd-9335-31ed1f8c761e-kube-api-access-cjlkd\") pod \"ceilometer-0\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " pod="openstack/ceilometer-0" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.255764 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c46769b4-dd47-47eb-ae71-f1cf844b93ba" path="/var/lib/kubelet/pods/c46769b4-dd47-47eb-ae71-f1cf844b93ba/volumes" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.649602 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9b6adc35-266a-478c-ae17-63a88705e8a9","Type":"ContainerStarted","Data":"57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32"} Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.649649 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.670004 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.788258 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-run-httpd\") pod \"4decc081-75bf-47cd-9335-31ed1f8c761e\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.788320 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-combined-ca-bundle\") pod \"4decc081-75bf-47cd-9335-31ed1f8c761e\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.788407 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-log-httpd\") pod \"4decc081-75bf-47cd-9335-31ed1f8c761e\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.788565 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-sg-core-conf-yaml\") pod \"4decc081-75bf-47cd-9335-31ed1f8c761e\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.788614 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-scripts\") pod \"4decc081-75bf-47cd-9335-31ed1f8c761e\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.788777 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjlkd\" (UniqueName: \"kubernetes.io/projected/4decc081-75bf-47cd-9335-31ed1f8c761e-kube-api-access-cjlkd\") pod \"4decc081-75bf-47cd-9335-31ed1f8c761e\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.788809 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-config-data\") pod \"4decc081-75bf-47cd-9335-31ed1f8c761e\" (UID: \"4decc081-75bf-47cd-9335-31ed1f8c761e\") " Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.790837 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4decc081-75bf-47cd-9335-31ed1f8c761e" (UID: "4decc081-75bf-47cd-9335-31ed1f8c761e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.790917 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4decc081-75bf-47cd-9335-31ed1f8c761e" (UID: "4decc081-75bf-47cd-9335-31ed1f8c761e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.796554 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4decc081-75bf-47cd-9335-31ed1f8c761e" (UID: "4decc081-75bf-47cd-9335-31ed1f8c761e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.796584 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4decc081-75bf-47cd-9335-31ed1f8c761e" (UID: "4decc081-75bf-47cd-9335-31ed1f8c761e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.797340 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4decc081-75bf-47cd-9335-31ed1f8c761e-kube-api-access-cjlkd" (OuterVolumeSpecName: "kube-api-access-cjlkd") pod "4decc081-75bf-47cd-9335-31ed1f8c761e" (UID: "4decc081-75bf-47cd-9335-31ed1f8c761e"). InnerVolumeSpecName "kube-api-access-cjlkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.797582 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-scripts" (OuterVolumeSpecName: "scripts") pod "4decc081-75bf-47cd-9335-31ed1f8c761e" (UID: "4decc081-75bf-47cd-9335-31ed1f8c761e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.798845 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-config-data" (OuterVolumeSpecName: "config-data") pod "4decc081-75bf-47cd-9335-31ed1f8c761e" (UID: "4decc081-75bf-47cd-9335-31ed1f8c761e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.892628 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.892671 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.892833 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.892854 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjlkd\" (UniqueName: \"kubernetes.io/projected/4decc081-75bf-47cd-9335-31ed1f8c761e-kube-api-access-cjlkd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.892869 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.892881 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4decc081-75bf-47cd-9335-31ed1f8c761e-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.892914 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4decc081-75bf-47cd-9335-31ed1f8c761e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:33 crc kubenswrapper[4949]: I0216 11:33:33.901087 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.391356 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.39133307 podStartE2EDuration="4.39133307s" podCreationTimestamp="2026-02-16 11:33:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:33.67983118 +0000 UTC m=+1603.309165345" watchObservedRunningTime="2026-02-16 11:33:34.39133307 +0000 UTC m=+1604.020667235" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.392643 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.397297 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.405573 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.410143 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.413895 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.414750 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.416363 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-f89fs" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.521785 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-nova-metadata-tls-certs\") pod \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.521954 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-logs\") pod \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.522018 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-config-data\") pod \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.522201 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfp55\" (UniqueName: \"kubernetes.io/projected/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-kube-api-access-pfp55\") pod \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.522362 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-combined-ca-bundle\") pod \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\" (UID: \"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf\") " Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.523455 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-logs" (OuterVolumeSpecName: "logs") pod "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" (UID: "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.523824 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgdjb\" (UniqueName: \"kubernetes.io/projected/c501b89b-60ac-4275-a573-9324cc865c14-kube-api-access-hgdjb\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.524050 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.524252 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-config-data\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.524379 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-scripts\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.524823 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.532422 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-kube-api-access-pfp55" (OuterVolumeSpecName: "kube-api-access-pfp55") pod "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" (UID: "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf"). InnerVolumeSpecName "kube-api-access-pfp55". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.551073 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.551374 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.570803 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" (UID: "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.593114 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" (UID: "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.595015 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-config-data" (OuterVolumeSpecName: "config-data") pod "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" (UID: "8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.626685 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.626778 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-config-data\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.626846 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-scripts\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.627071 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgdjb\" (UniqueName: \"kubernetes.io/projected/c501b89b-60ac-4275-a573-9324cc865c14-kube-api-access-hgdjb\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.627322 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.627425 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfp55\" (UniqueName: \"kubernetes.io/projected/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-kube-api-access-pfp55\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.627448 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.627463 4949 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.631393 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-config-data\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.633248 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-scripts\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.633302 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.644698 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgdjb\" (UniqueName: \"kubernetes.io/projected/c501b89b-60ac-4275-a573-9324cc865c14-kube-api-access-hgdjb\") pod \"aodh-0\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.665932 4949 generic.go:334] "Generic (PLEG): container finished" podID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerID="80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec" exitCode=0 Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.666011 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.666052 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf","Type":"ContainerDied","Data":"80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec"} Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.666105 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf","Type":"ContainerDied","Data":"079855774b3f77b0f372f09c237f58d21c45b2a4bfcf7388fd12d5f307d375e4"} Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.666127 4949 scope.go:117] "RemoveContainer" containerID="80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.666453 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.713091 4949 scope.go:117] "RemoveContainer" containerID="1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.748782 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.786498 4949 scope.go:117] "RemoveContainer" containerID="80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec" Feb 16 11:33:34 crc kubenswrapper[4949]: E0216 11:33:34.790066 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec\": container with ID starting with 80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec not found: ID does not exist" containerID="80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.790111 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec"} err="failed to get container status \"80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec\": rpc error: code = NotFound desc = could not find container \"80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec\": container with ID starting with 80d715caa1f09bc70781d98e854c03a3e9608cd2e2d42fc2bf295a80afbddaec not found: ID does not exist" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.790139 4949 scope.go:117] "RemoveContainer" containerID="1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391" Feb 16 11:33:34 crc kubenswrapper[4949]: E0216 11:33:34.792093 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391\": container with ID starting with 1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391 not found: ID does not exist" containerID="1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.792141 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391"} err="failed to get container status \"1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391\": rpc error: code = NotFound desc = could not find container \"1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391\": container with ID starting with 1b263914b8c93744d4917ebfb3919393a870e44cd516d21490c1f66bfbcaf391 not found: ID does not exist" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.811566 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.837902 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.854210 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.867345 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.893079 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: E0216 11:33:34.893904 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-log" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.893920 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-log" Feb 16 11:33:34 crc kubenswrapper[4949]: E0216 11:33:34.893931 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-metadata" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.893937 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-metadata" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.894245 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-metadata" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.894268 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" containerName="nova-metadata-log" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.897942 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.901536 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.901733 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.909458 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.919810 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.923973 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.924483 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.924889 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.936380 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:34 crc kubenswrapper[4949]: I0216 11:33:34.964036 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.046531 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-log-httpd\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047354 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d71e65be-6778-41c8-8a34-eda639803764-logs\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047408 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047426 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047464 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-scripts\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047482 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd87l\" (UniqueName: \"kubernetes.io/projected/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-kube-api-access-fd87l\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047545 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047566 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-run-httpd\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047608 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-config-data\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047714 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047805 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-config-data\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.047838 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rggcj\" (UniqueName: \"kubernetes.io/projected/d71e65be-6778-41c8-8a34-eda639803764-kube-api-access-rggcj\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.077789 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-4mz4p"] Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.078221 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" podUID="679eb150-43d4-444a-8f51-738e92914fde" containerName="dnsmasq-dns" containerID="cri-o://ae3369b1c59205788426355296ba345952b253e703e844a023bd92ef87423def" gracePeriod=10 Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150333 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rggcj\" (UniqueName: \"kubernetes.io/projected/d71e65be-6778-41c8-8a34-eda639803764-kube-api-access-rggcj\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150397 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-log-httpd\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150433 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d71e65be-6778-41c8-8a34-eda639803764-logs\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150480 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150498 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150539 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-scripts\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150553 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd87l\" (UniqueName: \"kubernetes.io/projected/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-kube-api-access-fd87l\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150605 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150620 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-run-httpd\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150656 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-config-data\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150732 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.150809 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-config-data\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.152126 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d71e65be-6778-41c8-8a34-eda639803764-logs\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.152272 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-run-httpd\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.156741 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-log-httpd\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.158821 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.159115 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-scripts\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.161958 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-config-data\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.181665 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-config-data\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.182699 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.183342 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.183809 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.188803 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rggcj\" (UniqueName: \"kubernetes.io/projected/d71e65be-6778-41c8-8a34-eda639803764-kube-api-access-rggcj\") pod \"nova-metadata-0\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.209768 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd87l\" (UniqueName: \"kubernetes.io/projected/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-kube-api-access-fd87l\") pod \"ceilometer-0\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.271897 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4decc081-75bf-47cd-9335-31ed1f8c761e" path="/var/lib/kubelet/pods/4decc081-75bf-47cd-9335-31ed1f8c761e/volumes" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.274158 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf" path="/var/lib/kubelet/pods/8e26e7dd-1ef4-4d9a-a8ed-362c36c8edbf/volumes" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.276666 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.323075 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.345883 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.495429 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.708312 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerStarted","Data":"e799723a31f3e5ba4736bbc253650473c5e02618cf48a74ebbae5b6d7ac1cc91"} Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.719962 4949 generic.go:334] "Generic (PLEG): container finished" podID="679eb150-43d4-444a-8f51-738e92914fde" containerID="ae3369b1c59205788426355296ba345952b253e703e844a023bd92ef87423def" exitCode=0 Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.720069 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" event={"ID":"679eb150-43d4-444a-8f51-738e92914fde","Type":"ContainerDied","Data":"ae3369b1c59205788426355296ba345952b253e703e844a023bd92ef87423def"} Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.816095 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.979034 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-config\") pod \"679eb150-43d4-444a-8f51-738e92914fde\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.979115 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-svc\") pod \"679eb150-43d4-444a-8f51-738e92914fde\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.979265 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-swift-storage-0\") pod \"679eb150-43d4-444a-8f51-738e92914fde\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.979306 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-sb\") pod \"679eb150-43d4-444a-8f51-738e92914fde\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.979834 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-nb\") pod \"679eb150-43d4-444a-8f51-738e92914fde\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " Feb 16 11:33:35 crc kubenswrapper[4949]: I0216 11:33:35.979939 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlvkr\" (UniqueName: \"kubernetes.io/projected/679eb150-43d4-444a-8f51-738e92914fde-kube-api-access-wlvkr\") pod \"679eb150-43d4-444a-8f51-738e92914fde\" (UID: \"679eb150-43d4-444a-8f51-738e92914fde\") " Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.010235 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/679eb150-43d4-444a-8f51-738e92914fde-kube-api-access-wlvkr" (OuterVolumeSpecName: "kube-api-access-wlvkr") pod "679eb150-43d4-444a-8f51-738e92914fde" (UID: "679eb150-43d4-444a-8f51-738e92914fde"). InnerVolumeSpecName "kube-api-access-wlvkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.054928 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.084696 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlvkr\" (UniqueName: \"kubernetes.io/projected/679eb150-43d4-444a-8f51-738e92914fde-kube-api-access-wlvkr\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.145514 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "679eb150-43d4-444a-8f51-738e92914fde" (UID: "679eb150-43d4-444a-8f51-738e92914fde"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.151787 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "679eb150-43d4-444a-8f51-738e92914fde" (UID: "679eb150-43d4-444a-8f51-738e92914fde"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.159871 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-config" (OuterVolumeSpecName: "config") pod "679eb150-43d4-444a-8f51-738e92914fde" (UID: "679eb150-43d4-444a-8f51-738e92914fde"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.182401 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "679eb150-43d4-444a-8f51-738e92914fde" (UID: "679eb150-43d4-444a-8f51-738e92914fde"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.226833 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.226879 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.227922 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.227942 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.245804 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "679eb150-43d4-444a-8f51-738e92914fde" (UID: "679eb150-43d4-444a-8f51-738e92914fde"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.301513 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.331557 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/679eb150-43d4-444a-8f51-738e92914fde-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:36 crc kubenswrapper[4949]: E0216 11:33:36.518466 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.743632 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" event={"ID":"679eb150-43d4-444a-8f51-738e92914fde","Type":"ContainerDied","Data":"f20f146d1fd7c13096e2d96649230a04bb1c3c5943ff83408886e1e4bd356465"} Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.743717 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-4mz4p" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.744993 4949 scope.go:117] "RemoveContainer" containerID="ae3369b1c59205788426355296ba345952b253e703e844a023bd92ef87423def" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.757801 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d71e65be-6778-41c8-8a34-eda639803764","Type":"ContainerStarted","Data":"957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163"} Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.757855 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d71e65be-6778-41c8-8a34-eda639803764","Type":"ContainerStarted","Data":"840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4"} Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.758496 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d71e65be-6778-41c8-8a34-eda639803764","Type":"ContainerStarted","Data":"1cce764f5b49df02d33a145a6e1428349a55332bbdfe9042ed5ec5198be343bb"} Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.765001 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerStarted","Data":"c0efdcec1f1b2508d2c7db6ae491d1e256fa82aa523d7bcd53dd5152c8aa0c37"} Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.768003 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerStarted","Data":"77ae50adcc0d1a0e2f90ac7e818e676f9f2caa443f5d04455b8d3e74fe4a096b"} Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.795024 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.794994726 podStartE2EDuration="2.794994726s" podCreationTimestamp="2026-02-16 11:33:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:36.781481909 +0000 UTC m=+1606.410816064" watchObservedRunningTime="2026-02-16 11:33:36.794994726 +0000 UTC m=+1606.424328911" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.799720 4949 scope.go:117] "RemoveContainer" containerID="68fe340601768a70e7c6beb6b1215686b60086aa4e5a1eef7f4053cd0aacb042" Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.832708 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-4mz4p"] Feb 16 11:33:36 crc kubenswrapper[4949]: I0216 11:33:36.860506 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-4mz4p"] Feb 16 11:33:37 crc kubenswrapper[4949]: I0216 11:33:37.251907 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="679eb150-43d4-444a-8f51-738e92914fde" path="/var/lib/kubelet/pods/679eb150-43d4-444a-8f51-738e92914fde/volumes" Feb 16 11:33:37 crc kubenswrapper[4949]: I0216 11:33:37.804689 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerStarted","Data":"f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6"} Feb 16 11:33:38 crc kubenswrapper[4949]: I0216 11:33:38.190853 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:38 crc kubenswrapper[4949]: I0216 11:33:38.676228 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Feb 16 11:33:38 crc kubenswrapper[4949]: I0216 11:33:38.900431 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 16 11:33:39 crc kubenswrapper[4949]: I0216 11:33:39.004009 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 16 11:33:39 crc kubenswrapper[4949]: I0216 11:33:39.853455 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerStarted","Data":"69df7ae54182679bceaad103c14b20aeba4419f42ac27f254a538d08db822eea"} Feb 16 11:33:39 crc kubenswrapper[4949]: I0216 11:33:39.858150 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerStarted","Data":"a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef"} Feb 16 11:33:39 crc kubenswrapper[4949]: I0216 11:33:39.907672 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 16 11:33:40 crc kubenswrapper[4949]: I0216 11:33:40.347066 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 16 11:33:40 crc kubenswrapper[4949]: I0216 11:33:40.347425 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 16 11:33:40 crc kubenswrapper[4949]: I0216 11:33:40.874502 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerStarted","Data":"7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06"} Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.260276 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.260316 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 16 11:33:41 crc kubenswrapper[4949]: E0216 11:33:41.782444 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.891735 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerStarted","Data":"cca2d100a096482e03e52d50436e1239e2707b14c22973e02cb2c6c8524d0a89"} Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.896225 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerStarted","Data":"07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa"} Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.896538 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="ceilometer-central-agent" containerID="cri-o://f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6" gracePeriod=30 Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.896763 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.897432 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="proxy-httpd" containerID="cri-o://07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa" gracePeriod=30 Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.897561 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="sg-core" containerID="cri-o://7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06" gracePeriod=30 Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.897568 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="ceilometer-notification-agent" containerID="cri-o://a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef" gracePeriod=30 Feb 16 11:33:41 crc kubenswrapper[4949]: I0216 11:33:41.933229 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.01047102 podStartE2EDuration="7.933162091s" podCreationTimestamp="2026-02-16 11:33:34 +0000 UTC" firstStartedPulling="2026-02-16 11:33:36.289306504 +0000 UTC m=+1605.918640669" lastFinishedPulling="2026-02-16 11:33:41.211997575 +0000 UTC m=+1610.841331740" observedRunningTime="2026-02-16 11:33:41.929038233 +0000 UTC m=+1611.558372398" watchObservedRunningTime="2026-02-16 11:33:41.933162091 +0000 UTC m=+1611.562496256" Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.251456 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.254:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.252119 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.254:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.919324 4949 generic.go:334] "Generic (PLEG): container finished" podID="5a95f751-76e3-4289-abf1-0328e14e6ac8" containerID="e387f4a5f489eda015be94c35737cf691a286fb27aac46cd361b9541080f2962" exitCode=137 Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.919675 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5a95f751-76e3-4289-abf1-0328e14e6ac8","Type":"ContainerDied","Data":"e387f4a5f489eda015be94c35737cf691a286fb27aac46cd361b9541080f2962"} Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.927060 4949 generic.go:334] "Generic (PLEG): container finished" podID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerID="07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa" exitCode=0 Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.927128 4949 generic.go:334] "Generic (PLEG): container finished" podID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerID="7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06" exitCode=2 Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.927139 4949 generic.go:334] "Generic (PLEG): container finished" podID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerID="a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef" exitCode=0 Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.927182 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerDied","Data":"07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa"} Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.927219 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerDied","Data":"7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06"} Feb 16 11:33:42 crc kubenswrapper[4949]: I0216 11:33:42.927232 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerDied","Data":"a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef"} Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.288754 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.357122 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-combined-ca-bundle\") pod \"5a95f751-76e3-4289-abf1-0328e14e6ac8\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.357380 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99c5p\" (UniqueName: \"kubernetes.io/projected/5a95f751-76e3-4289-abf1-0328e14e6ac8-kube-api-access-99c5p\") pod \"5a95f751-76e3-4289-abf1-0328e14e6ac8\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.358330 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-config-data\") pod \"5a95f751-76e3-4289-abf1-0328e14e6ac8\" (UID: \"5a95f751-76e3-4289-abf1-0328e14e6ac8\") " Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.367405 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a95f751-76e3-4289-abf1-0328e14e6ac8-kube-api-access-99c5p" (OuterVolumeSpecName: "kube-api-access-99c5p") pod "5a95f751-76e3-4289-abf1-0328e14e6ac8" (UID: "5a95f751-76e3-4289-abf1-0328e14e6ac8"). InnerVolumeSpecName "kube-api-access-99c5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.405446 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-config-data" (OuterVolumeSpecName: "config-data") pod "5a95f751-76e3-4289-abf1-0328e14e6ac8" (UID: "5a95f751-76e3-4289-abf1-0328e14e6ac8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.410813 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a95f751-76e3-4289-abf1-0328e14e6ac8" (UID: "5a95f751-76e3-4289-abf1-0328e14e6ac8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.462536 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.462586 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a95f751-76e3-4289-abf1-0328e14e6ac8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.462606 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99c5p\" (UniqueName: \"kubernetes.io/projected/5a95f751-76e3-4289-abf1-0328e14e6ac8-kube-api-access-99c5p\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.943255 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerStarted","Data":"6323305bdbf51c6e4bffe689102365d8f2dacc9da2e1d41a6a9c8c8286525c89"} Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.943724 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-api" containerID="cri-o://77ae50adcc0d1a0e2f90ac7e818e676f9f2caa443f5d04455b8d3e74fe4a096b" gracePeriod=30 Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.944494 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-listener" containerID="cri-o://6323305bdbf51c6e4bffe689102365d8f2dacc9da2e1d41a6a9c8c8286525c89" gracePeriod=30 Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.944562 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-notifier" containerID="cri-o://cca2d100a096482e03e52d50436e1239e2707b14c22973e02cb2c6c8524d0a89" gracePeriod=30 Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.944629 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-evaluator" containerID="cri-o://69df7ae54182679bceaad103c14b20aeba4419f42ac27f254a538d08db822eea" gracePeriod=30 Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.950329 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5a95f751-76e3-4289-abf1-0328e14e6ac8","Type":"ContainerDied","Data":"f7d0bdd8536a8b3472848e665be42b8d75b79433beb8888d5796e789bb0f80ae"} Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.950390 4949 scope.go:117] "RemoveContainer" containerID="e387f4a5f489eda015be94c35737cf691a286fb27aac46cd361b9541080f2962" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.950537 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:43 crc kubenswrapper[4949]: I0216 11:33:43.968439 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.699648169 podStartE2EDuration="9.968417821s" podCreationTimestamp="2026-02-16 11:33:34 +0000 UTC" firstStartedPulling="2026-02-16 11:33:35.53431918 +0000 UTC m=+1605.163653335" lastFinishedPulling="2026-02-16 11:33:42.803088812 +0000 UTC m=+1612.432422987" observedRunningTime="2026-02-16 11:33:43.966836555 +0000 UTC m=+1613.596170720" watchObservedRunningTime="2026-02-16 11:33:43.968417821 +0000 UTC m=+1613.597751986" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.037769 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.058885 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.076625 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:44 crc kubenswrapper[4949]: E0216 11:33:44.077274 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679eb150-43d4-444a-8f51-738e92914fde" containerName="dnsmasq-dns" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.077292 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="679eb150-43d4-444a-8f51-738e92914fde" containerName="dnsmasq-dns" Feb 16 11:33:44 crc kubenswrapper[4949]: E0216 11:33:44.077330 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679eb150-43d4-444a-8f51-738e92914fde" containerName="init" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.077344 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="679eb150-43d4-444a-8f51-738e92914fde" containerName="init" Feb 16 11:33:44 crc kubenswrapper[4949]: E0216 11:33:44.077369 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a95f751-76e3-4289-abf1-0328e14e6ac8" containerName="nova-cell1-novncproxy-novncproxy" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.077376 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a95f751-76e3-4289-abf1-0328e14e6ac8" containerName="nova-cell1-novncproxy-novncproxy" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.077631 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="679eb150-43d4-444a-8f51-738e92914fde" containerName="dnsmasq-dns" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.077648 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a95f751-76e3-4289-abf1-0328e14e6ac8" containerName="nova-cell1-novncproxy-novncproxy" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.078644 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.083353 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.083479 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.083553 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.101977 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.180922 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.188223 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.188287 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.188436 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.188473 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clg7g\" (UniqueName: \"kubernetes.io/projected/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-kube-api-access-clg7g\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.296459 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.296497 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.296521 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.296544 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clg7g\" (UniqueName: \"kubernetes.io/projected/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-kube-api-access-clg7g\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.296646 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.306192 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.312121 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.317991 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.319850 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clg7g\" (UniqueName: \"kubernetes.io/projected/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-kube-api-access-clg7g\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.330417 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7103704-b3b4-4de6-8ca7-b70b44b10cd6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e7103704-b3b4-4de6-8ca7-b70b44b10cd6\") " pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.414883 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.924760 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.964551 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e7103704-b3b4-4de6-8ca7-b70b44b10cd6","Type":"ContainerStarted","Data":"844423bb6984b79f17a512964743f997b4ba4f536b2da28bc37d09bd02ed3288"} Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.967757 4949 generic.go:334] "Generic (PLEG): container finished" podID="c501b89b-60ac-4275-a573-9324cc865c14" containerID="cca2d100a096482e03e52d50436e1239e2707b14c22973e02cb2c6c8524d0a89" exitCode=0 Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.967788 4949 generic.go:334] "Generic (PLEG): container finished" podID="c501b89b-60ac-4275-a573-9324cc865c14" containerID="69df7ae54182679bceaad103c14b20aeba4419f42ac27f254a538d08db822eea" exitCode=0 Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.967800 4949 generic.go:334] "Generic (PLEG): container finished" podID="c501b89b-60ac-4275-a573-9324cc865c14" containerID="77ae50adcc0d1a0e2f90ac7e818e676f9f2caa443f5d04455b8d3e74fe4a096b" exitCode=0 Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.967820 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerDied","Data":"cca2d100a096482e03e52d50436e1239e2707b14c22973e02cb2c6c8524d0a89"} Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.967841 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerDied","Data":"69df7ae54182679bceaad103c14b20aeba4419f42ac27f254a538d08db822eea"} Feb 16 11:33:44 crc kubenswrapper[4949]: I0216 11:33:44.967852 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerDied","Data":"77ae50adcc0d1a0e2f90ac7e818e676f9f2caa443f5d04455b8d3e74fe4a096b"} Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.248727 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a95f751-76e3-4289-abf1-0328e14e6ac8" path="/var/lib/kubelet/pods/5a95f751-76e3-4289-abf1-0328e14e6ac8/volumes" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.347474 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.347513 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.854780 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.957067 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-scripts\") pod \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.957419 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fd87l\" (UniqueName: \"kubernetes.io/projected/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-kube-api-access-fd87l\") pod \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.958397 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-log-httpd\") pod \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.958648 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-combined-ca-bundle\") pod \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.959033 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-config-data\") pod \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.959313 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-sg-core-conf-yaml\") pod \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.959453 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-run-httpd\") pod \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\" (UID: \"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44\") " Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.960040 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" (UID: "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.960478 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" (UID: "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.961090 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.961433 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.966095 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-kube-api-access-fd87l" (OuterVolumeSpecName: "kube-api-access-fd87l") pod "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" (UID: "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44"). InnerVolumeSpecName "kube-api-access-fd87l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:33:45 crc kubenswrapper[4949]: I0216 11:33:45.966153 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-scripts" (OuterVolumeSpecName: "scripts") pod "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" (UID: "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.016472 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e7103704-b3b4-4de6-8ca7-b70b44b10cd6","Type":"ContainerStarted","Data":"fd91195cb573da4ab20c7b8ebe16e38b76995b1ea6280fb11630b3c1422f6bf7"} Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.021542 4949 generic.go:334] "Generic (PLEG): container finished" podID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerID="f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6" exitCode=0 Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.021611 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerDied","Data":"f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6"} Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.021658 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e2a12bce-abf1-4ac0-9d15-6c9ee69eba44","Type":"ContainerDied","Data":"c0efdcec1f1b2508d2c7db6ae491d1e256fa82aa523d7bcd53dd5152c8aa0c37"} Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.021683 4949 scope.go:117] "RemoveContainer" containerID="07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.021929 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.042867 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.042841731 podStartE2EDuration="2.042841731s" podCreationTimestamp="2026-02-16 11:33:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:46.037470088 +0000 UTC m=+1615.666804263" watchObservedRunningTime="2026-02-16 11:33:46.042841731 +0000 UTC m=+1615.672175916" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.051028 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" (UID: "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.064236 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.064264 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.064278 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fd87l\" (UniqueName: \"kubernetes.io/projected/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-kube-api-access-fd87l\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.135069 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" (UID: "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.161141 4949 scope.go:117] "RemoveContainer" containerID="7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.166873 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.205624 4949 scope.go:117] "RemoveContainer" containerID="a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.217330 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-config-data" (OuterVolumeSpecName: "config-data") pod "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" (UID: "e2a12bce-abf1-4ac0-9d15-6c9ee69eba44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.231934 4949 scope.go:117] "RemoveContainer" containerID="f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.252341 4949 scope.go:117] "RemoveContainer" containerID="07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa" Feb 16 11:33:46 crc kubenswrapper[4949]: E0216 11:33:46.252836 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa\": container with ID starting with 07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa not found: ID does not exist" containerID="07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.252872 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa"} err="failed to get container status \"07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa\": rpc error: code = NotFound desc = could not find container \"07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa\": container with ID starting with 07e6e410926afa36b58cc9f2bb14d95a6f986476539563aaa61921d6073155aa not found: ID does not exist" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.252896 4949 scope.go:117] "RemoveContainer" containerID="7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06" Feb 16 11:33:46 crc kubenswrapper[4949]: E0216 11:33:46.253217 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06\": container with ID starting with 7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06 not found: ID does not exist" containerID="7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.253239 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06"} err="failed to get container status \"7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06\": rpc error: code = NotFound desc = could not find container \"7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06\": container with ID starting with 7587885da613eb87828702ec4671c4f23ed76a43b069eaad103ba7bff78fae06 not found: ID does not exist" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.253252 4949 scope.go:117] "RemoveContainer" containerID="a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef" Feb 16 11:33:46 crc kubenswrapper[4949]: E0216 11:33:46.253467 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef\": container with ID starting with a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef not found: ID does not exist" containerID="a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.253487 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef"} err="failed to get container status \"a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef\": rpc error: code = NotFound desc = could not find container \"a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef\": container with ID starting with a88775356f3a23147cb75d68c96cedbe7f10cb56fdf53fe421b59860064861ef not found: ID does not exist" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.253498 4949 scope.go:117] "RemoveContainer" containerID="f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6" Feb 16 11:33:46 crc kubenswrapper[4949]: E0216 11:33:46.253690 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6\": container with ID starting with f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6 not found: ID does not exist" containerID="f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.253707 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6"} err="failed to get container status \"f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6\": rpc error: code = NotFound desc = could not find container \"f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6\": container with ID starting with f24a0f2bc59eb36f04d730f037b9e58b2ef9def5d1ebe5aaaf6f68cc2d0194d6 not found: ID does not exist" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.269555 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.368203 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.368442 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.384131 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.423662 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.443502 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:46 crc kubenswrapper[4949]: E0216 11:33:46.443967 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="sg-core" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.443991 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="sg-core" Feb 16 11:33:46 crc kubenswrapper[4949]: E0216 11:33:46.444023 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="ceilometer-notification-agent" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.444030 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="ceilometer-notification-agent" Feb 16 11:33:46 crc kubenswrapper[4949]: E0216 11:33:46.444048 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="ceilometer-central-agent" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.444054 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="ceilometer-central-agent" Feb 16 11:33:46 crc kubenswrapper[4949]: E0216 11:33:46.444095 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="proxy-httpd" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.444102 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="proxy-httpd" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.444342 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="ceilometer-notification-agent" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.444364 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="proxy-httpd" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.444385 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="ceilometer-central-agent" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.444393 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" containerName="sg-core" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.446634 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.450103 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.450937 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.456706 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.581965 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-config-data\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.582094 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-log-httpd\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.582179 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-run-httpd\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.582227 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.582350 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-scripts\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.582520 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpjft\" (UniqueName: \"kubernetes.io/projected/28eebc18-c4e8-474f-bb20-aa5919f71f9e-kube-api-access-zpjft\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.582794 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.685774 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.685904 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-config-data\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.685960 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-log-httpd\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.686042 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-run-httpd\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.686093 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.686235 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-scripts\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.686369 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpjft\" (UniqueName: \"kubernetes.io/projected/28eebc18-c4e8-474f-bb20-aa5919f71f9e-kube-api-access-zpjft\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.688690 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-log-httpd\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.688695 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-run-httpd\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.692903 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.696607 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-config-data\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.697596 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-scripts\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.702436 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.718517 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpjft\" (UniqueName: \"kubernetes.io/projected/28eebc18-c4e8-474f-bb20-aa5919f71f9e-kube-api-access-zpjft\") pod \"ceilometer-0\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " pod="openstack/ceilometer-0" Feb 16 11:33:46 crc kubenswrapper[4949]: I0216 11:33:46.768458 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:33:47 crc kubenswrapper[4949]: I0216 11:33:47.253693 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2a12bce-abf1-4ac0-9d15-6c9ee69eba44" path="/var/lib/kubelet/pods/e2a12bce-abf1-4ac0-9d15-6c9ee69eba44/volumes" Feb 16 11:33:47 crc kubenswrapper[4949]: W0216 11:33:47.297537 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28eebc18_c4e8_474f_bb20_aa5919f71f9e.slice/crio-ae5975de579909110c90515b0da8a4ddd1622ce468e0b07a427a52aed1c6e2b6 WatchSource:0}: Error finding container ae5975de579909110c90515b0da8a4ddd1622ce468e0b07a427a52aed1c6e2b6: Status 404 returned error can't find the container with id ae5975de579909110c90515b0da8a4ddd1622ce468e0b07a427a52aed1c6e2b6 Feb 16 11:33:47 crc kubenswrapper[4949]: I0216 11:33:47.298622 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:33:48 crc kubenswrapper[4949]: I0216 11:33:48.054199 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerStarted","Data":"ae5975de579909110c90515b0da8a4ddd1622ce468e0b07a427a52aed1c6e2b6"} Feb 16 11:33:48 crc kubenswrapper[4949]: E0216 11:33:48.296061 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:48 crc kubenswrapper[4949]: E0216 11:33:48.298248 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:49 crc kubenswrapper[4949]: I0216 11:33:49.068158 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerStarted","Data":"266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c"} Feb 16 11:33:49 crc kubenswrapper[4949]: I0216 11:33:49.415796 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:50 crc kubenswrapper[4949]: I0216 11:33:50.082767 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerStarted","Data":"d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6"} Feb 16 11:33:51 crc kubenswrapper[4949]: E0216 11:33:51.094470 4949 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91104758_7e02_4761_bc39_fbca029cda0f.slice\": RecentStats: unable to find data in memory cache]" Feb 16 11:33:51 crc kubenswrapper[4949]: I0216 11:33:51.109479 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerStarted","Data":"6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329"} Feb 16 11:33:51 crc kubenswrapper[4949]: I0216 11:33:51.257208 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 16 11:33:51 crc kubenswrapper[4949]: I0216 11:33:51.257865 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 16 11:33:51 crc kubenswrapper[4949]: I0216 11:33:51.261787 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 16 11:33:51 crc kubenswrapper[4949]: I0216 11:33:51.265433 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 16 11:33:52 crc kubenswrapper[4949]: I0216 11:33:52.504104 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 16 11:33:52 crc kubenswrapper[4949]: I0216 11:33:52.525780 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 16 11:33:53 crc kubenswrapper[4949]: I0216 11:33:53.524525 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerStarted","Data":"121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592"} Feb 16 11:33:53 crc kubenswrapper[4949]: I0216 11:33:53.524937 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:33:53 crc kubenswrapper[4949]: I0216 11:33:53.564315 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.779397342 podStartE2EDuration="7.564293219s" podCreationTimestamp="2026-02-16 11:33:46 +0000 UTC" firstStartedPulling="2026-02-16 11:33:47.300715436 +0000 UTC m=+1616.930049601" lastFinishedPulling="2026-02-16 11:33:53.085611293 +0000 UTC m=+1622.714945478" observedRunningTime="2026-02-16 11:33:53.549602018 +0000 UTC m=+1623.178936333" watchObservedRunningTime="2026-02-16 11:33:53.564293219 +0000 UTC m=+1623.193627384" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.416765 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.488239 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.556159 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.743972 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-j5mtk"] Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.756404 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.762911 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.763740 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.785937 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-j5mtk"] Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.945658 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-config-data\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.946327 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.946545 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-scripts\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:54 crc kubenswrapper[4949]: I0216 11:33:54.946778 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzbwl\" (UniqueName: \"kubernetes.io/projected/6a0bf28e-fc72-402a-b5e9-077eb5218110-kube-api-access-hzbwl\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.049404 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzbwl\" (UniqueName: \"kubernetes.io/projected/6a0bf28e-fc72-402a-b5e9-077eb5218110-kube-api-access-hzbwl\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.049535 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-config-data\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.049675 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.049760 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-scripts\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.055677 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-scripts\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.055742 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.056031 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-config-data\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.074332 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzbwl\" (UniqueName: \"kubernetes.io/projected/6a0bf28e-fc72-402a-b5e9-077eb5218110-kube-api-access-hzbwl\") pod \"nova-cell1-cell-mapping-j5mtk\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.087917 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.353648 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.359394 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.364799 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.549540 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 16 11:33:55 crc kubenswrapper[4949]: I0216 11:33:55.687982 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-j5mtk"] Feb 16 11:33:56 crc kubenswrapper[4949]: I0216 11:33:56.563843 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-j5mtk" event={"ID":"6a0bf28e-fc72-402a-b5e9-077eb5218110","Type":"ContainerStarted","Data":"c47e71feba59c5aa1f640a9adb819e43ba30466d91d2c60801613210a1e77339"} Feb 16 11:33:56 crc kubenswrapper[4949]: I0216 11:33:56.564143 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-j5mtk" event={"ID":"6a0bf28e-fc72-402a-b5e9-077eb5218110","Type":"ContainerStarted","Data":"91a3c155ea5faad5c248b60a73fd32c1abc7bd326497d3f29989c2b708a1cfdb"} Feb 16 11:33:56 crc kubenswrapper[4949]: I0216 11:33:56.596889 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-j5mtk" podStartSLOduration=2.596864611 podStartE2EDuration="2.596864611s" podCreationTimestamp="2026-02-16 11:33:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:33:56.582803748 +0000 UTC m=+1626.212137913" watchObservedRunningTime="2026-02-16 11:33:56.596864611 +0000 UTC m=+1626.226198786" Feb 16 11:34:01 crc kubenswrapper[4949]: I0216 11:34:01.619858 4949 generic.go:334] "Generic (PLEG): container finished" podID="6a0bf28e-fc72-402a-b5e9-077eb5218110" containerID="c47e71feba59c5aa1f640a9adb819e43ba30466d91d2c60801613210a1e77339" exitCode=0 Feb 16 11:34:01 crc kubenswrapper[4949]: I0216 11:34:01.619927 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-j5mtk" event={"ID":"6a0bf28e-fc72-402a-b5e9-077eb5218110","Type":"ContainerDied","Data":"c47e71feba59c5aa1f640a9adb819e43ba30466d91d2c60801613210a1e77339"} Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.109293 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.284282 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzbwl\" (UniqueName: \"kubernetes.io/projected/6a0bf28e-fc72-402a-b5e9-077eb5218110-kube-api-access-hzbwl\") pod \"6a0bf28e-fc72-402a-b5e9-077eb5218110\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.284425 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-combined-ca-bundle\") pod \"6a0bf28e-fc72-402a-b5e9-077eb5218110\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.284663 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-scripts\") pod \"6a0bf28e-fc72-402a-b5e9-077eb5218110\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.284699 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-config-data\") pod \"6a0bf28e-fc72-402a-b5e9-077eb5218110\" (UID: \"6a0bf28e-fc72-402a-b5e9-077eb5218110\") " Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.291876 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a0bf28e-fc72-402a-b5e9-077eb5218110-kube-api-access-hzbwl" (OuterVolumeSpecName: "kube-api-access-hzbwl") pod "6a0bf28e-fc72-402a-b5e9-077eb5218110" (UID: "6a0bf28e-fc72-402a-b5e9-077eb5218110"). InnerVolumeSpecName "kube-api-access-hzbwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.295007 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-scripts" (OuterVolumeSpecName: "scripts") pod "6a0bf28e-fc72-402a-b5e9-077eb5218110" (UID: "6a0bf28e-fc72-402a-b5e9-077eb5218110"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.322471 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a0bf28e-fc72-402a-b5e9-077eb5218110" (UID: "6a0bf28e-fc72-402a-b5e9-077eb5218110"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.325547 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-config-data" (OuterVolumeSpecName: "config-data") pod "6a0bf28e-fc72-402a-b5e9-077eb5218110" (UID: "6a0bf28e-fc72-402a-b5e9-077eb5218110"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.392934 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzbwl\" (UniqueName: \"kubernetes.io/projected/6a0bf28e-fc72-402a-b5e9-077eb5218110-kube-api-access-hzbwl\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.392963 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.392974 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.392982 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a0bf28e-fc72-402a-b5e9-077eb5218110-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.642747 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-j5mtk" event={"ID":"6a0bf28e-fc72-402a-b5e9-077eb5218110","Type":"ContainerDied","Data":"91a3c155ea5faad5c248b60a73fd32c1abc7bd326497d3f29989c2b708a1cfdb"} Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.642798 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91a3c155ea5faad5c248b60a73fd32c1abc7bd326497d3f29989c2b708a1cfdb" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.642818 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-j5mtk" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.835672 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.835986 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-log" containerID="cri-o://6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72" gracePeriod=30 Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.836586 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-api" containerID="cri-o://57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32" gracePeriod=30 Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.850799 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.851313 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" containerName="nova-scheduler-scheduler" containerID="cri-o://dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" gracePeriod=30 Feb 16 11:34:03 crc kubenswrapper[4949]: E0216 11:34:03.904711 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 16 11:34:03 crc kubenswrapper[4949]: E0216 11:34:03.907331 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 16 11:34:03 crc kubenswrapper[4949]: E0216 11:34:03.910256 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 16 11:34:03 crc kubenswrapper[4949]: E0216 11:34:03.910305 4949 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" containerName="nova-scheduler-scheduler" Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.920311 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.920554 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-log" containerID="cri-o://840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4" gracePeriod=30 Feb 16 11:34:03 crc kubenswrapper[4949]: I0216 11:34:03.920708 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-metadata" containerID="cri-o://957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163" gracePeriod=30 Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.550847 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.550949 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.551004 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.552202 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.552295 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" gracePeriod=600 Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.660972 4949 generic.go:334] "Generic (PLEG): container finished" podID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerID="6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72" exitCode=143 Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.661208 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9b6adc35-266a-478c-ae17-63a88705e8a9","Type":"ContainerDied","Data":"6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72"} Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.664938 4949 generic.go:334] "Generic (PLEG): container finished" podID="d71e65be-6778-41c8-8a34-eda639803764" containerID="840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4" exitCode=143 Feb 16 11:34:04 crc kubenswrapper[4949]: I0216 11:34:04.665048 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d71e65be-6778-41c8-8a34-eda639803764","Type":"ContainerDied","Data":"840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4"} Feb 16 11:34:04 crc kubenswrapper[4949]: E0216 11:34:04.699978 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:34:05 crc kubenswrapper[4949]: I0216 11:34:05.679936 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" exitCode=0 Feb 16 11:34:05 crc kubenswrapper[4949]: I0216 11:34:05.679985 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437"} Feb 16 11:34:05 crc kubenswrapper[4949]: I0216 11:34:05.680025 4949 scope.go:117] "RemoveContainer" containerID="c897db476ea0eaab84f58dfc5ce1290f1b6a8a12d03297a1f99537a46ae19905" Feb 16 11:34:05 crc kubenswrapper[4949]: I0216 11:34:05.681138 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:34:05 crc kubenswrapper[4949]: E0216 11:34:05.681622 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.070683 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": read tcp 10.217.0.2:57504->10.217.1.2:8775: read: connection reset by peer" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.070756 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": read tcp 10.217.0.2:57490->10.217.1.2:8775: read: connection reset by peer" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.673290 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.679913 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.730262 4949 generic.go:334] "Generic (PLEG): container finished" podID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerID="57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32" exitCode=0 Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.730340 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9b6adc35-266a-478c-ae17-63a88705e8a9","Type":"ContainerDied","Data":"57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32"} Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.730373 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9b6adc35-266a-478c-ae17-63a88705e8a9","Type":"ContainerDied","Data":"f0672e269bc6e22d366d35710c5099e97ee2670d2d9c23e5de294e3aa57570cd"} Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.730393 4949 scope.go:117] "RemoveContainer" containerID="57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.730526 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.758521 4949 generic.go:334] "Generic (PLEG): container finished" podID="d71e65be-6778-41c8-8a34-eda639803764" containerID="957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163" exitCode=0 Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.758586 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d71e65be-6778-41c8-8a34-eda639803764","Type":"ContainerDied","Data":"957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163"} Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.758630 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d71e65be-6778-41c8-8a34-eda639803764","Type":"ContainerDied","Data":"1cce764f5b49df02d33a145a6e1428349a55332bbdfe9042ed5ec5198be343bb"} Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.758724 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.794211 4949 scope.go:117] "RemoveContainer" containerID="6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.827370 4949 scope.go:117] "RemoveContainer" containerID="57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32" Feb 16 11:34:07 crc kubenswrapper[4949]: E0216 11:34:07.827962 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32\": container with ID starting with 57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32 not found: ID does not exist" containerID="57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.828010 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32"} err="failed to get container status \"57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32\": rpc error: code = NotFound desc = could not find container \"57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32\": container with ID starting with 57ca1a45d63fc68f8706fc5c09637deb745fe88c4cac25e86952e39227fecd32 not found: ID does not exist" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.828041 4949 scope.go:117] "RemoveContainer" containerID="6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72" Feb 16 11:34:07 crc kubenswrapper[4949]: E0216 11:34:07.828512 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72\": container with ID starting with 6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72 not found: ID does not exist" containerID="6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.828543 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72"} err="failed to get container status \"6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72\": rpc error: code = NotFound desc = could not find container \"6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72\": container with ID starting with 6a08d00f90e435bb819d281e25e763b6a90ec608dc5cf88bf667a76972aa6f72 not found: ID does not exist" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.828563 4949 scope.go:117] "RemoveContainer" containerID="957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.832978 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-config-data\") pod \"d71e65be-6778-41c8-8a34-eda639803764\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833280 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-combined-ca-bundle\") pod \"9b6adc35-266a-478c-ae17-63a88705e8a9\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833377 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-config-data\") pod \"9b6adc35-266a-478c-ae17-63a88705e8a9\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833460 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kznpg\" (UniqueName: \"kubernetes.io/projected/9b6adc35-266a-478c-ae17-63a88705e8a9-kube-api-access-kznpg\") pod \"9b6adc35-266a-478c-ae17-63a88705e8a9\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833540 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-nova-metadata-tls-certs\") pod \"d71e65be-6778-41c8-8a34-eda639803764\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833757 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-internal-tls-certs\") pod \"9b6adc35-266a-478c-ae17-63a88705e8a9\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833858 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-combined-ca-bundle\") pod \"d71e65be-6778-41c8-8a34-eda639803764\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833911 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rggcj\" (UniqueName: \"kubernetes.io/projected/d71e65be-6778-41c8-8a34-eda639803764-kube-api-access-rggcj\") pod \"d71e65be-6778-41c8-8a34-eda639803764\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833961 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d71e65be-6778-41c8-8a34-eda639803764-logs\") pod \"d71e65be-6778-41c8-8a34-eda639803764\" (UID: \"d71e65be-6778-41c8-8a34-eda639803764\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.833998 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b6adc35-266a-478c-ae17-63a88705e8a9-logs\") pod \"9b6adc35-266a-478c-ae17-63a88705e8a9\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.834103 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-public-tls-certs\") pod \"9b6adc35-266a-478c-ae17-63a88705e8a9\" (UID: \"9b6adc35-266a-478c-ae17-63a88705e8a9\") " Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.835114 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d71e65be-6778-41c8-8a34-eda639803764-logs" (OuterVolumeSpecName: "logs") pod "d71e65be-6778-41c8-8a34-eda639803764" (UID: "d71e65be-6778-41c8-8a34-eda639803764"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.836476 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b6adc35-266a-478c-ae17-63a88705e8a9-logs" (OuterVolumeSpecName: "logs") pod "9b6adc35-266a-478c-ae17-63a88705e8a9" (UID: "9b6adc35-266a-478c-ae17-63a88705e8a9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.844504 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b6adc35-266a-478c-ae17-63a88705e8a9-kube-api-access-kznpg" (OuterVolumeSpecName: "kube-api-access-kznpg") pod "9b6adc35-266a-478c-ae17-63a88705e8a9" (UID: "9b6adc35-266a-478c-ae17-63a88705e8a9"). InnerVolumeSpecName "kube-api-access-kznpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.844777 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d71e65be-6778-41c8-8a34-eda639803764-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.844851 4949 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b6adc35-266a-478c-ae17-63a88705e8a9-logs\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.844864 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d71e65be-6778-41c8-8a34-eda639803764-kube-api-access-rggcj" (OuterVolumeSpecName: "kube-api-access-rggcj") pod "d71e65be-6778-41c8-8a34-eda639803764" (UID: "d71e65be-6778-41c8-8a34-eda639803764"). InnerVolumeSpecName "kube-api-access-rggcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.857043 4949 scope.go:117] "RemoveContainer" containerID="840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.874813 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b6adc35-266a-478c-ae17-63a88705e8a9" (UID: "9b6adc35-266a-478c-ae17-63a88705e8a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.890396 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d71e65be-6778-41c8-8a34-eda639803764" (UID: "d71e65be-6778-41c8-8a34-eda639803764"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.895607 4949 scope.go:117] "RemoveContainer" containerID="957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163" Feb 16 11:34:07 crc kubenswrapper[4949]: E0216 11:34:07.896142 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163\": container with ID starting with 957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163 not found: ID does not exist" containerID="957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.896215 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163"} err="failed to get container status \"957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163\": rpc error: code = NotFound desc = could not find container \"957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163\": container with ID starting with 957d5f117b32011d3f96e5331daff6d25a8f2f625d676f5a798386ac07d29163 not found: ID does not exist" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.896255 4949 scope.go:117] "RemoveContainer" containerID="840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4" Feb 16 11:34:07 crc kubenswrapper[4949]: E0216 11:34:07.897121 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4\": container with ID starting with 840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4 not found: ID does not exist" containerID="840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.897156 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4"} err="failed to get container status \"840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4\": rpc error: code = NotFound desc = could not find container \"840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4\": container with ID starting with 840fabca59d475ba7fab292b1bb8d5db13ac403f879d3dcf2ffa75ad8e70f2e4 not found: ID does not exist" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.901941 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-config-data" (OuterVolumeSpecName: "config-data") pod "9b6adc35-266a-478c-ae17-63a88705e8a9" (UID: "9b6adc35-266a-478c-ae17-63a88705e8a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.914381 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-config-data" (OuterVolumeSpecName: "config-data") pod "d71e65be-6778-41c8-8a34-eda639803764" (UID: "d71e65be-6778-41c8-8a34-eda639803764"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.919539 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "d71e65be-6778-41c8-8a34-eda639803764" (UID: "d71e65be-6778-41c8-8a34-eda639803764"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.930929 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9b6adc35-266a-478c-ae17-63a88705e8a9" (UID: "9b6adc35-266a-478c-ae17-63a88705e8a9"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947031 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "9b6adc35-266a-478c-ae17-63a88705e8a9" (UID: "9b6adc35-266a-478c-ae17-63a88705e8a9"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947042 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947091 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947105 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947117 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kznpg\" (UniqueName: \"kubernetes.io/projected/9b6adc35-266a-478c-ae17-63a88705e8a9-kube-api-access-kznpg\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947127 4949 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947136 4949 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947148 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d71e65be-6778-41c8-8a34-eda639803764-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:07 crc kubenswrapper[4949]: I0216 11:34:07.947157 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rggcj\" (UniqueName: \"kubernetes.io/projected/d71e65be-6778-41c8-8a34-eda639803764-kube-api-access-rggcj\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.050835 4949 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b6adc35-266a-478c-ae17-63a88705e8a9-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.094984 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.127673 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.139774 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.140437 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-metadata" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140461 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-metadata" Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.140509 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-log" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140519 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-log" Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.140534 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0bf28e-fc72-402a-b5e9-077eb5218110" containerName="nova-manage" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140543 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0bf28e-fc72-402a-b5e9-077eb5218110" containerName="nova-manage" Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.140562 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-api" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140571 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-api" Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.140593 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-log" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140601 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-log" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140887 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-log" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140916 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-log" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140930 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" containerName="nova-api-api" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140942 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d71e65be-6778-41c8-8a34-eda639803764" containerName="nova-metadata-metadata" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.140952 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0bf28e-fc72-402a-b5e9-077eb5218110" containerName="nova-manage" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.143263 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.147772 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.149267 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.149600 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.167723 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.208892 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.228162 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.243952 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.249425 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.251655 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.251772 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.255427 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-public-tls-certs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.255484 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-logs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.255643 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.255709 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.255795 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-config-data\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.255956 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpxn8\" (UniqueName: \"kubernetes.io/projected/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-kube-api-access-qpxn8\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.267195 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358386 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-config-data\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358469 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-config-data\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358545 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-logs\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358583 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpchl\" (UniqueName: \"kubernetes.io/projected/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-kube-api-access-tpchl\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358634 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpxn8\" (UniqueName: \"kubernetes.io/projected/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-kube-api-access-qpxn8\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358723 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-public-tls-certs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358750 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-logs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358769 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358842 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358861 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.358880 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.360986 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-logs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.364039 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.364290 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.364413 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-public-tls-certs\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.372226 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-config-data\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.376860 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpxn8\" (UniqueName: \"kubernetes.io/projected/4e78c7bd-f9e0-49da-83cb-e6bff985ad7d-kube-api-access-qpxn8\") pod \"nova-api-0\" (UID: \"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d\") " pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.461588 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpchl\" (UniqueName: \"kubernetes.io/projected/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-kube-api-access-tpchl\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.461817 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.461897 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.461955 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-config-data\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.462054 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-logs\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.462568 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-logs\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.468368 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.468484 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.468728 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-config-data\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.478722 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.487051 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpchl\" (UniqueName: \"kubernetes.io/projected/4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9-kube-api-access-tpchl\") pod \"nova-metadata-0\" (UID: \"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9\") " pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: I0216 11:34:08.573314 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.902254 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5 is running failed: container process not found" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.902659 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5 is running failed: container process not found" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.902880 4949 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5 is running failed: container process not found" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 16 11:34:08 crc kubenswrapper[4949]: E0216 11:34:08.902911 4949 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" containerName="nova-scheduler-scheduler" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.078967 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.173647 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 16 11:34:09 crc kubenswrapper[4949]: W0216 11:34:09.234672 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a3a859f_f5a1_4ca8_b71d_f526e24ac4b9.slice/crio-b1008ef9dccb0cc4d41a0109bd6f183fb9c731731f61e514768d9471b225d9ec WatchSource:0}: Error finding container b1008ef9dccb0cc4d41a0109bd6f183fb9c731731f61e514768d9471b225d9ec: Status 404 returned error can't find the container with id b1008ef9dccb0cc4d41a0109bd6f183fb9c731731f61e514768d9471b225d9ec Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.258930 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b6adc35-266a-478c-ae17-63a88705e8a9" path="/var/lib/kubelet/pods/9b6adc35-266a-478c-ae17-63a88705e8a9/volumes" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.260725 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d71e65be-6778-41c8-8a34-eda639803764" path="/var/lib/kubelet/pods/d71e65be-6778-41c8-8a34-eda639803764/volumes" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.424820 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.628441 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-config-data\") pod \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.628539 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-combined-ca-bundle\") pod \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.628909 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tftfv\" (UniqueName: \"kubernetes.io/projected/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-kube-api-access-tftfv\") pod \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\" (UID: \"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1\") " Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.649437 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-kube-api-access-tftfv" (OuterVolumeSpecName: "kube-api-access-tftfv") pod "f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" (UID: "f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1"). InnerVolumeSpecName "kube-api-access-tftfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.699528 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-config-data" (OuterVolumeSpecName: "config-data") pod "f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" (UID: "f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.728321 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" (UID: "f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.733522 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tftfv\" (UniqueName: \"kubernetes.io/projected/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-kube-api-access-tftfv\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.733579 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.733595 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.805356 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9","Type":"ContainerStarted","Data":"448dc9c6652d9ea9672d0c1f872dbc8d0c0b6b514729f06b4c1e7dae3b005c3e"} Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.805710 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9","Type":"ContainerStarted","Data":"b1008ef9dccb0cc4d41a0109bd6f183fb9c731731f61e514768d9471b225d9ec"} Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.806727 4949 generic.go:334] "Generic (PLEG): container finished" podID="f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" exitCode=0 Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.806790 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1","Type":"ContainerDied","Data":"dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5"} Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.806811 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1","Type":"ContainerDied","Data":"0643148a9bf6032e6e5cbf28b40b7910eb2945ee96944cfa56446487c7afc011"} Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.806833 4949 scope.go:117] "RemoveContainer" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.806867 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.812615 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d","Type":"ContainerStarted","Data":"20b69e4552dd8b84e57d7c6d9e331987e695700c07303ef14818f99c73d51cc4"} Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.812671 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d","Type":"ContainerStarted","Data":"f0d5f52a7b435b6fb13721649708618d028b239752d04349fa40f08e84ba5a1c"} Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.812681 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4e78c7bd-f9e0-49da-83cb-e6bff985ad7d","Type":"ContainerStarted","Data":"15ad5b8bb6b33b5474e91867c6357163361838878f5de9180a05779588474128"} Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.837445 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.837422046 podStartE2EDuration="1.837422046s" podCreationTimestamp="2026-02-16 11:34:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:34:09.836441958 +0000 UTC m=+1639.465776133" watchObservedRunningTime="2026-02-16 11:34:09.837422046 +0000 UTC m=+1639.466756211" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.847577 4949 scope.go:117] "RemoveContainer" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" Feb 16 11:34:09 crc kubenswrapper[4949]: E0216 11:34:09.849248 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5\": container with ID starting with dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5 not found: ID does not exist" containerID="dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.849308 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5"} err="failed to get container status \"dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5\": rpc error: code = NotFound desc = could not find container \"dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5\": container with ID starting with dc8b443557e7ef591dc5c9e3d9dd638603d3c9d9ac8122b1c87600a739d7f0f5 not found: ID does not exist" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.875463 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.898129 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.918247 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:34:09 crc kubenswrapper[4949]: E0216 11:34:09.918925 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" containerName="nova-scheduler-scheduler" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.918943 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" containerName="nova-scheduler-scheduler" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.919215 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" containerName="nova-scheduler-scheduler" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.920207 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.925905 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 16 11:34:09 crc kubenswrapper[4949]: I0216 11:34:09.981907 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.053314 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.053460 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hx8b\" (UniqueName: \"kubernetes.io/projected/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-kube-api-access-4hx8b\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.053674 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-config-data\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.156422 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.156519 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hx8b\" (UniqueName: \"kubernetes.io/projected/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-kube-api-access-4hx8b\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.156587 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-config-data\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.162964 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.163980 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-config-data\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.184962 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hx8b\" (UniqueName: \"kubernetes.io/projected/df3c5421-aaa8-41cb-9e45-fb91a87c89e9-kube-api-access-4hx8b\") pod \"nova-scheduler-0\" (UID: \"df3c5421-aaa8-41cb-9e45-fb91a87c89e9\") " pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.257742 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 16 11:34:10 crc kubenswrapper[4949]: W0216 11:34:10.755642 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf3c5421_aaa8_41cb_9e45_fb91a87c89e9.slice/crio-5cfd70f49d9b12b4f2a0ea3a633d831845b0fe2af85c6941ea7fb39fe799ed31 WatchSource:0}: Error finding container 5cfd70f49d9b12b4f2a0ea3a633d831845b0fe2af85c6941ea7fb39fe799ed31: Status 404 returned error can't find the container with id 5cfd70f49d9b12b4f2a0ea3a633d831845b0fe2af85c6941ea7fb39fe799ed31 Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.756332 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.833019 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9","Type":"ContainerStarted","Data":"ff7a0fc00cd368c29d46fd3bd1f8fdef7ee0d94c12e62bde2aef7e73df3570e3"} Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.838928 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"df3c5421-aaa8-41cb-9e45-fb91a87c89e9","Type":"ContainerStarted","Data":"5cfd70f49d9b12b4f2a0ea3a633d831845b0fe2af85c6941ea7fb39fe799ed31"} Feb 16 11:34:10 crc kubenswrapper[4949]: I0216 11:34:10.856126 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.8561080580000002 podStartE2EDuration="2.856108058s" podCreationTimestamp="2026-02-16 11:34:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:34:10.855600204 +0000 UTC m=+1640.484934389" watchObservedRunningTime="2026-02-16 11:34:10.856108058 +0000 UTC m=+1640.485442223" Feb 16 11:34:11 crc kubenswrapper[4949]: I0216 11:34:11.248721 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1" path="/var/lib/kubelet/pods/f25f06e6-4e81-469e-b0e8-0cf9e75c5ab1/volumes" Feb 16 11:34:11 crc kubenswrapper[4949]: I0216 11:34:11.855160 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"df3c5421-aaa8-41cb-9e45-fb91a87c89e9","Type":"ContainerStarted","Data":"eff1edd94e6d65df5450dd68dae01e362b7bf1a7ea15c9bfaf109defb4a50d46"} Feb 16 11:34:11 crc kubenswrapper[4949]: I0216 11:34:11.879409 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.879391312 podStartE2EDuration="2.879391312s" podCreationTimestamp="2026-02-16 11:34:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:34:11.875532761 +0000 UTC m=+1641.504866936" watchObservedRunningTime="2026-02-16 11:34:11.879391312 +0000 UTC m=+1641.508725477" Feb 16 11:34:13 crc kubenswrapper[4949]: I0216 11:34:13.575000 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 16 11:34:13 crc kubenswrapper[4949]: I0216 11:34:13.576729 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 16 11:34:14 crc kubenswrapper[4949]: I0216 11:34:14.896798 4949 generic.go:334] "Generic (PLEG): container finished" podID="c501b89b-60ac-4275-a573-9324cc865c14" containerID="6323305bdbf51c6e4bffe689102365d8f2dacc9da2e1d41a6a9c8c8286525c89" exitCode=137 Feb 16 11:34:14 crc kubenswrapper[4949]: I0216 11:34:14.897154 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerDied","Data":"6323305bdbf51c6e4bffe689102365d8f2dacc9da2e1d41a6a9c8c8286525c89"} Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.121331 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.258944 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.312586 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-scripts\") pod \"c501b89b-60ac-4275-a573-9324cc865c14\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.313639 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-combined-ca-bundle\") pod \"c501b89b-60ac-4275-a573-9324cc865c14\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.313735 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-config-data\") pod \"c501b89b-60ac-4275-a573-9324cc865c14\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.313785 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgdjb\" (UniqueName: \"kubernetes.io/projected/c501b89b-60ac-4275-a573-9324cc865c14-kube-api-access-hgdjb\") pod \"c501b89b-60ac-4275-a573-9324cc865c14\" (UID: \"c501b89b-60ac-4275-a573-9324cc865c14\") " Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.317822 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-scripts" (OuterVolumeSpecName: "scripts") pod "c501b89b-60ac-4275-a573-9324cc865c14" (UID: "c501b89b-60ac-4275-a573-9324cc865c14"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.319104 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c501b89b-60ac-4275-a573-9324cc865c14-kube-api-access-hgdjb" (OuterVolumeSpecName: "kube-api-access-hgdjb") pod "c501b89b-60ac-4275-a573-9324cc865c14" (UID: "c501b89b-60ac-4275-a573-9324cc865c14"). InnerVolumeSpecName "kube-api-access-hgdjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.419094 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgdjb\" (UniqueName: \"kubernetes.io/projected/c501b89b-60ac-4275-a573-9324cc865c14-kube-api-access-hgdjb\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.419150 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.459604 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-config-data" (OuterVolumeSpecName: "config-data") pod "c501b89b-60ac-4275-a573-9324cc865c14" (UID: "c501b89b-60ac-4275-a573-9324cc865c14"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.462550 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c501b89b-60ac-4275-a573-9324cc865c14" (UID: "c501b89b-60ac-4275-a573-9324cc865c14"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.521898 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.521951 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c501b89b-60ac-4275-a573-9324cc865c14-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.912089 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c501b89b-60ac-4275-a573-9324cc865c14","Type":"ContainerDied","Data":"e799723a31f3e5ba4736bbc253650473c5e02618cf48a74ebbae5b6d7ac1cc91"} Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.912995 4949 scope.go:117] "RemoveContainer" containerID="6323305bdbf51c6e4bffe689102365d8f2dacc9da2e1d41a6a9c8c8286525c89" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.912191 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.954077 4949 scope.go:117] "RemoveContainer" containerID="cca2d100a096482e03e52d50436e1239e2707b14c22973e02cb2c6c8524d0a89" Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.960411 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Feb 16 11:34:15 crc kubenswrapper[4949]: I0216 11:34:15.992290 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.005598 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Feb 16 11:34:16 crc kubenswrapper[4949]: E0216 11:34:16.006368 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-evaluator" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.006394 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-evaluator" Feb 16 11:34:16 crc kubenswrapper[4949]: E0216 11:34:16.006430 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-listener" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.006440 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-listener" Feb 16 11:34:16 crc kubenswrapper[4949]: E0216 11:34:16.006451 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-notifier" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.006460 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-notifier" Feb 16 11:34:16 crc kubenswrapper[4949]: E0216 11:34:16.006501 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-api" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.006509 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-api" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.006800 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-evaluator" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.006829 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-listener" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.006854 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-api" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.006868 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c501b89b-60ac-4275-a573-9324cc865c14" containerName="aodh-notifier" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.010294 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.012519 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.012592 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-f89fs" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.012884 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.012898 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.013120 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.013307 4949 scope.go:117] "RemoveContainer" containerID="69df7ae54182679bceaad103c14b20aeba4419f42ac27f254a538d08db822eea" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.032924 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.059675 4949 scope.go:117] "RemoveContainer" containerID="77ae50adcc0d1a0e2f90ac7e818e676f9f2caa443f5d04455b8d3e74fe4a096b" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.140632 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-config-data\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.140717 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-scripts\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.140743 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.140782 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9cvn\" (UniqueName: \"kubernetes.io/projected/46afdda5-3045-4445-bf9a-d040c4c9eac3-kube-api-access-z9cvn\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.141002 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-internal-tls-certs\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.141034 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-public-tls-certs\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.245897 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-internal-tls-certs\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.245980 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-public-tls-certs\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.246086 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-config-data\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.246192 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-scripts\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.246212 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.246250 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9cvn\" (UniqueName: \"kubernetes.io/projected/46afdda5-3045-4445-bf9a-d040c4c9eac3-kube-api-access-z9cvn\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.251530 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.252109 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-config-data\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.253749 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-internal-tls-certs\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.259537 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-scripts\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.265327 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/46afdda5-3045-4445-bf9a-d040c4c9eac3-public-tls-certs\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.266879 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9cvn\" (UniqueName: \"kubernetes.io/projected/46afdda5-3045-4445-bf9a-d040c4c9eac3-kube-api-access-z9cvn\") pod \"aodh-0\" (UID: \"46afdda5-3045-4445-bf9a-d040c4c9eac3\") " pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.362029 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.790509 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.900244 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.905786 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:34:16 crc kubenswrapper[4949]: I0216 11:34:16.930037 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"46afdda5-3045-4445-bf9a-d040c4c9eac3","Type":"ContainerStarted","Data":"7d91a58b42e37c7f4e1de3fb0f9cbf9735be928230d8bc36f1520cce78c08d7d"} Feb 16 11:34:17 crc kubenswrapper[4949]: I0216 11:34:17.250503 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c501b89b-60ac-4275-a573-9324cc865c14" path="/var/lib/kubelet/pods/c501b89b-60ac-4275-a573-9324cc865c14/volumes" Feb 16 11:34:17 crc kubenswrapper[4949]: I0216 11:34:17.946245 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"46afdda5-3045-4445-bf9a-d040c4c9eac3","Type":"ContainerStarted","Data":"6269e68dbdfc97bc681ca1575359ac5ae94d22d99d1d7b5cb4a4eafd4223abcb"} Feb 16 11:34:18 crc kubenswrapper[4949]: I0216 11:34:18.480608 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 16 11:34:18 crc kubenswrapper[4949]: I0216 11:34:18.480680 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 16 11:34:18 crc kubenswrapper[4949]: I0216 11:34:18.575448 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 16 11:34:18 crc kubenswrapper[4949]: I0216 11:34:18.575555 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 16 11:34:19 crc kubenswrapper[4949]: I0216 11:34:19.500603 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4e78c7bd-f9e0-49da-83cb-e6bff985ad7d" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.6:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:34:19 crc kubenswrapper[4949]: I0216 11:34:19.502501 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4e78c7bd-f9e0-49da-83cb-e6bff985ad7d" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.6:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:34:19 crc kubenswrapper[4949]: I0216 11:34:19.589530 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.7:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:34:19 crc kubenswrapper[4949]: I0216 11:34:19.589573 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.7:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 16 11:34:19 crc kubenswrapper[4949]: I0216 11:34:19.970494 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"46afdda5-3045-4445-bf9a-d040c4c9eac3","Type":"ContainerStarted","Data":"898b1fe98e6f981f967c5fc90d56e53a0b8f3881d8565228cd5ea19d3443e182"} Feb 16 11:34:20 crc kubenswrapper[4949]: I0216 11:34:20.236875 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:34:20 crc kubenswrapper[4949]: E0216 11:34:20.237640 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:34:20 crc kubenswrapper[4949]: I0216 11:34:20.258481 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 16 11:34:20 crc kubenswrapper[4949]: I0216 11:34:20.303392 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 16 11:34:20 crc kubenswrapper[4949]: I0216 11:34:20.987366 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"46afdda5-3045-4445-bf9a-d040c4c9eac3","Type":"ContainerStarted","Data":"502fbc4195c36b6d7833594e71f32824a294bb60a091981c98b8dfb3eb8cbb39"} Feb 16 11:34:21 crc kubenswrapper[4949]: I0216 11:34:21.028372 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 16 11:34:22 crc kubenswrapper[4949]: I0216 11:34:22.820714 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:34:22 crc kubenswrapper[4949]: I0216 11:34:22.823660 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" containerName="kube-state-metrics" containerID="cri-o://04cbb55a710e1af3150d6facd8083abe6253ef0aea8c224eef19eef55a9ab0ed" gracePeriod=30 Feb 16 11:34:22 crc kubenswrapper[4949]: I0216 11:34:22.981965 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:34:22 crc kubenswrapper[4949]: I0216 11:34:22.982289 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="bbc90277-5d42-4c6c-a24c-d06f066d4be5" containerName="mysqld-exporter" containerID="cri-o://916eb3462c426bff05c3868b91bd0c2f0f33e4ebdd77dd91f8983587590944cc" gracePeriod=30 Feb 16 11:34:23 crc kubenswrapper[4949]: I0216 11:34:23.035875 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"46afdda5-3045-4445-bf9a-d040c4c9eac3","Type":"ContainerStarted","Data":"fd3acfe2943728c16e6a7820ef233b2ea373d4d1dcbe47bc56818b46c8cf8633"} Feb 16 11:34:23 crc kubenswrapper[4949]: I0216 11:34:23.038579 4949 generic.go:334] "Generic (PLEG): container finished" podID="f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" containerID="04cbb55a710e1af3150d6facd8083abe6253ef0aea8c224eef19eef55a9ab0ed" exitCode=2 Feb 16 11:34:23 crc kubenswrapper[4949]: I0216 11:34:23.038628 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41","Type":"ContainerDied","Data":"04cbb55a710e1af3150d6facd8083abe6253ef0aea8c224eef19eef55a9ab0ed"} Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.099648 4949 generic.go:334] "Generic (PLEG): container finished" podID="bbc90277-5d42-4c6c-a24c-d06f066d4be5" containerID="916eb3462c426bff05c3868b91bd0c2f0f33e4ebdd77dd91f8983587590944cc" exitCode=2 Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.099756 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"bbc90277-5d42-4c6c-a24c-d06f066d4be5","Type":"ContainerDied","Data":"916eb3462c426bff05c3868b91bd0c2f0f33e4ebdd77dd91f8983587590944cc"} Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.165964 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.548257186 podStartE2EDuration="9.165940859s" podCreationTimestamp="2026-02-16 11:34:15 +0000 UTC" firstStartedPulling="2026-02-16 11:34:16.90552075 +0000 UTC m=+1646.534854915" lastFinishedPulling="2026-02-16 11:34:22.523204423 +0000 UTC m=+1652.152538588" observedRunningTime="2026-02-16 11:34:24.139264994 +0000 UTC m=+1653.768599159" watchObservedRunningTime="2026-02-16 11:34:24.165940859 +0000 UTC m=+1653.795275014" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.521403 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.622079 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tm986\" (UniqueName: \"kubernetes.io/projected/f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41-kube-api-access-tm986\") pod \"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41\" (UID: \"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41\") " Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.647135 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41-kube-api-access-tm986" (OuterVolumeSpecName: "kube-api-access-tm986") pod "f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" (UID: "f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41"). InnerVolumeSpecName "kube-api-access-tm986". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.725332 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tm986\" (UniqueName: \"kubernetes.io/projected/f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41-kube-api-access-tm986\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.728767 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.827236 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gk6gn\" (UniqueName: \"kubernetes.io/projected/bbc90277-5d42-4c6c-a24c-d06f066d4be5-kube-api-access-gk6gn\") pod \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.827354 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-config-data\") pod \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.827423 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-combined-ca-bundle\") pod \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\" (UID: \"bbc90277-5d42-4c6c-a24c-d06f066d4be5\") " Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.833185 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbc90277-5d42-4c6c-a24c-d06f066d4be5-kube-api-access-gk6gn" (OuterVolumeSpecName: "kube-api-access-gk6gn") pod "bbc90277-5d42-4c6c-a24c-d06f066d4be5" (UID: "bbc90277-5d42-4c6c-a24c-d06f066d4be5"). InnerVolumeSpecName "kube-api-access-gk6gn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.896384 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bbc90277-5d42-4c6c-a24c-d06f066d4be5" (UID: "bbc90277-5d42-4c6c-a24c-d06f066d4be5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.930889 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gk6gn\" (UniqueName: \"kubernetes.io/projected/bbc90277-5d42-4c6c-a24c-d06f066d4be5-kube-api-access-gk6gn\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.931225 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:24 crc kubenswrapper[4949]: I0216 11:34:24.961193 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-config-data" (OuterVolumeSpecName: "config-data") pod "bbc90277-5d42-4c6c-a24c-d06f066d4be5" (UID: "bbc90277-5d42-4c6c-a24c-d06f066d4be5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.033521 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbc90277-5d42-4c6c-a24c-d06f066d4be5-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.115324 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41","Type":"ContainerDied","Data":"e1229e8f2bad278f06824aa65d08667f08e0fa7cb82fc7f8fbc50843c993cae4"} Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.115356 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.115417 4949 scope.go:117] "RemoveContainer" containerID="04cbb55a710e1af3150d6facd8083abe6253ef0aea8c224eef19eef55a9ab0ed" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.117988 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"bbc90277-5d42-4c6c-a24c-d06f066d4be5","Type":"ContainerDied","Data":"1327864664aebf0fc878c2c636d5f78f234aec401f58df168c4a4dba7505c4ac"} Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.118111 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.153138 4949 scope.go:117] "RemoveContainer" containerID="916eb3462c426bff05c3868b91bd0c2f0f33e4ebdd77dd91f8983587590944cc" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.173573 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.194657 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.228745 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.274299 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbc90277-5d42-4c6c-a24c-d06f066d4be5" path="/var/lib/kubelet/pods/bbc90277-5d42-4c6c-a24c-d06f066d4be5/volumes" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.275060 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.275099 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: E0216 11:34:25.275550 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" containerName="kube-state-metrics" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.275573 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" containerName="kube-state-metrics" Feb 16 11:34:25 crc kubenswrapper[4949]: E0216 11:34:25.275614 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbc90277-5d42-4c6c-a24c-d06f066d4be5" containerName="mysqld-exporter" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.275632 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbc90277-5d42-4c6c-a24c-d06f066d4be5" containerName="mysqld-exporter" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.275930 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbc90277-5d42-4c6c-a24c-d06f066d4be5" containerName="mysqld-exporter" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.275979 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" containerName="kube-state-metrics" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.296423 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.296573 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.299131 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.299036 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.311963 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.340546 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.340840 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.343286 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.343564 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.346271 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.346309 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.346802 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5qws\" (UniqueName: \"kubernetes.io/projected/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-kube-api-access-f5qws\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.347295 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-config-data\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.449509 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5qws\" (UniqueName: \"kubernetes.io/projected/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-kube-api-access-f5qws\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.449716 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vcrf\" (UniqueName: \"kubernetes.io/projected/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-api-access-8vcrf\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.449932 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-config-data\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.450078 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.450764 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.450822 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.450849 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.450899 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.455156 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-config-data\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.455264 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.459164 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.474265 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5qws\" (UniqueName: \"kubernetes.io/projected/ecc4a304-dde4-4f96-9b54-e0df21ac37c3-kube-api-access-f5qws\") pod \"mysqld-exporter-0\" (UID: \"ecc4a304-dde4-4f96-9b54-e0df21ac37c3\") " pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.554163 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.554247 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.554289 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.554425 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vcrf\" (UniqueName: \"kubernetes.io/projected/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-api-access-8vcrf\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.558216 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.558232 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.561820 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.570824 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vcrf\" (UniqueName: \"kubernetes.io/projected/4d408b50-de06-4fdc-a945-afcd98fb4775-kube-api-access-8vcrf\") pod \"kube-state-metrics-0\" (UID: \"4d408b50-de06-4fdc-a945-afcd98fb4775\") " pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.644953 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.677833 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.799155 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.799682 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="ceilometer-central-agent" containerID="cri-o://266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c" gracePeriod=30 Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.800326 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="proxy-httpd" containerID="cri-o://121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592" gracePeriod=30 Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.800630 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="sg-core" containerID="cri-o://6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329" gracePeriod=30 Feb 16 11:34:25 crc kubenswrapper[4949]: I0216 11:34:25.800673 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="ceilometer-notification-agent" containerID="cri-o://d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6" gracePeriod=30 Feb 16 11:34:26 crc kubenswrapper[4949]: I0216 11:34:26.140162 4949 generic.go:334] "Generic (PLEG): container finished" podID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerID="121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592" exitCode=0 Feb 16 11:34:26 crc kubenswrapper[4949]: I0216 11:34:26.140230 4949 generic.go:334] "Generic (PLEG): container finished" podID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerID="6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329" exitCode=2 Feb 16 11:34:26 crc kubenswrapper[4949]: I0216 11:34:26.140223 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerDied","Data":"121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592"} Feb 16 11:34:26 crc kubenswrapper[4949]: I0216 11:34:26.140294 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerDied","Data":"6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329"} Feb 16 11:34:26 crc kubenswrapper[4949]: W0216 11:34:26.269586 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podecc4a304_dde4_4f96_9b54_e0df21ac37c3.slice/crio-8d1776666ea93c425a5bf20bc8952944fc6031d97ec43e60c55091416cd9502c WatchSource:0}: Error finding container 8d1776666ea93c425a5bf20bc8952944fc6031d97ec43e60c55091416cd9502c: Status 404 returned error can't find the container with id 8d1776666ea93c425a5bf20bc8952944fc6031d97ec43e60c55091416cd9502c Feb 16 11:34:26 crc kubenswrapper[4949]: I0216 11:34:26.270560 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 16 11:34:26 crc kubenswrapper[4949]: W0216 11:34:26.392556 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d408b50_de06_4fdc_a945_afcd98fb4775.slice/crio-ebedef665c3c244e99b080667bc02d1a6f33e912e85016687110b2b706db01f7 WatchSource:0}: Error finding container ebedef665c3c244e99b080667bc02d1a6f33e912e85016687110b2b706db01f7: Status 404 returned error can't find the container with id ebedef665c3c244e99b080667bc02d1a6f33e912e85016687110b2b706db01f7 Feb 16 11:34:26 crc kubenswrapper[4949]: I0216 11:34:26.395027 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 16 11:34:27 crc kubenswrapper[4949]: I0216 11:34:27.158974 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4d408b50-de06-4fdc-a945-afcd98fb4775","Type":"ContainerStarted","Data":"ebedef665c3c244e99b080667bc02d1a6f33e912e85016687110b2b706db01f7"} Feb 16 11:34:27 crc kubenswrapper[4949]: I0216 11:34:27.164775 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"ecc4a304-dde4-4f96-9b54-e0df21ac37c3","Type":"ContainerStarted","Data":"8d1776666ea93c425a5bf20bc8952944fc6031d97ec43e60c55091416cd9502c"} Feb 16 11:34:27 crc kubenswrapper[4949]: I0216 11:34:27.175097 4949 generic.go:334] "Generic (PLEG): container finished" podID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerID="266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c" exitCode=0 Feb 16 11:34:27 crc kubenswrapper[4949]: I0216 11:34:27.175151 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerDied","Data":"266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c"} Feb 16 11:34:27 crc kubenswrapper[4949]: I0216 11:34:27.247637 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41" path="/var/lib/kubelet/pods/f89f3c3c-f6cb-4d3e-8950-fe35b2bfcc41/volumes" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.145234 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.191697 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4d408b50-de06-4fdc-a945-afcd98fb4775","Type":"ContainerStarted","Data":"7cf8b32ab7f2d46a67b5291978a9466f96e2ddc3d47ba534d63f6ed3036808c6"} Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.191775 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.195811 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"ecc4a304-dde4-4f96-9b54-e0df21ac37c3","Type":"ContainerStarted","Data":"c05d144fdc151fed780161dc2c71eaa418d4e46b18ae2c9350babab31c32d288"} Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.201720 4949 generic.go:334] "Generic (PLEG): container finished" podID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerID="d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6" exitCode=0 Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.201895 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerDied","Data":"d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6"} Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.201974 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"28eebc18-c4e8-474f-bb20-aa5919f71f9e","Type":"ContainerDied","Data":"ae5975de579909110c90515b0da8a4ddd1622ce468e0b07a427a52aed1c6e2b6"} Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.202045 4949 scope.go:117] "RemoveContainer" containerID="121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.202291 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.245572 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.064702234 podStartE2EDuration="3.245544375s" podCreationTimestamp="2026-02-16 11:34:25 +0000 UTC" firstStartedPulling="2026-02-16 11:34:26.395564216 +0000 UTC m=+1656.024898381" lastFinishedPulling="2026-02-16 11:34:27.576406357 +0000 UTC m=+1657.205740522" observedRunningTime="2026-02-16 11:34:28.222252497 +0000 UTC m=+1657.851586662" watchObservedRunningTime="2026-02-16 11:34:28.245544375 +0000 UTC m=+1657.874878540" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.253019 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=1.9042574829999999 podStartE2EDuration="3.253005549s" podCreationTimestamp="2026-02-16 11:34:25 +0000 UTC" firstStartedPulling="2026-02-16 11:34:26.272341512 +0000 UTC m=+1655.901675667" lastFinishedPulling="2026-02-16 11:34:27.621089548 +0000 UTC m=+1657.250423733" observedRunningTime="2026-02-16 11:34:28.240615014 +0000 UTC m=+1657.869949179" watchObservedRunningTime="2026-02-16 11:34:28.253005549 +0000 UTC m=+1657.882339714" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.254138 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-run-httpd\") pod \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.254286 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-sg-core-conf-yaml\") pod \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.254349 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpjft\" (UniqueName: \"kubernetes.io/projected/28eebc18-c4e8-474f-bb20-aa5919f71f9e-kube-api-access-zpjft\") pod \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.254386 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-combined-ca-bundle\") pod \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.254503 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-config-data\") pod \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.254669 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-scripts\") pod \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.254719 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-log-httpd\") pod \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\" (UID: \"28eebc18-c4e8-474f-bb20-aa5919f71f9e\") " Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.257768 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "28eebc18-c4e8-474f-bb20-aa5919f71f9e" (UID: "28eebc18-c4e8-474f-bb20-aa5919f71f9e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.258159 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "28eebc18-c4e8-474f-bb20-aa5919f71f9e" (UID: "28eebc18-c4e8-474f-bb20-aa5919f71f9e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.270412 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28eebc18-c4e8-474f-bb20-aa5919f71f9e-kube-api-access-zpjft" (OuterVolumeSpecName: "kube-api-access-zpjft") pod "28eebc18-c4e8-474f-bb20-aa5919f71f9e" (UID: "28eebc18-c4e8-474f-bb20-aa5919f71f9e"). InnerVolumeSpecName "kube-api-access-zpjft". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.292524 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-scripts" (OuterVolumeSpecName: "scripts") pod "28eebc18-c4e8-474f-bb20-aa5919f71f9e" (UID: "28eebc18-c4e8-474f-bb20-aa5919f71f9e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.292595 4949 scope.go:117] "RemoveContainer" containerID="6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.320463 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "28eebc18-c4e8-474f-bb20-aa5919f71f9e" (UID: "28eebc18-c4e8-474f-bb20-aa5919f71f9e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.321342 4949 scope.go:117] "RemoveContainer" containerID="d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.360563 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.360591 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.360601 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/28eebc18-c4e8-474f-bb20-aa5919f71f9e-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.360609 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.360620 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpjft\" (UniqueName: \"kubernetes.io/projected/28eebc18-c4e8-474f-bb20-aa5919f71f9e-kube-api-access-zpjft\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.384001 4949 scope.go:117] "RemoveContainer" containerID="266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.420151 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "28eebc18-c4e8-474f-bb20-aa5919f71f9e" (UID: "28eebc18-c4e8-474f-bb20-aa5919f71f9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.426512 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-config-data" (OuterVolumeSpecName: "config-data") pod "28eebc18-c4e8-474f-bb20-aa5919f71f9e" (UID: "28eebc18-c4e8-474f-bb20-aa5919f71f9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.438502 4949 scope.go:117] "RemoveContainer" containerID="121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592" Feb 16 11:34:28 crc kubenswrapper[4949]: E0216 11:34:28.439079 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592\": container with ID starting with 121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592 not found: ID does not exist" containerID="121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.439158 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592"} err="failed to get container status \"121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592\": rpc error: code = NotFound desc = could not find container \"121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592\": container with ID starting with 121245bca1d05abad828d44d82f8dac7b0ec7c05c15b34a4aa340497b4266592 not found: ID does not exist" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.439215 4949 scope.go:117] "RemoveContainer" containerID="6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329" Feb 16 11:34:28 crc kubenswrapper[4949]: E0216 11:34:28.439614 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329\": container with ID starting with 6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329 not found: ID does not exist" containerID="6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.439650 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329"} err="failed to get container status \"6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329\": rpc error: code = NotFound desc = could not find container \"6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329\": container with ID starting with 6c8603501db783d4cf8635b42ee9d1963582d2e3c4d985fbbc4458755f9f4329 not found: ID does not exist" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.439670 4949 scope.go:117] "RemoveContainer" containerID="d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6" Feb 16 11:34:28 crc kubenswrapper[4949]: E0216 11:34:28.439893 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6\": container with ID starting with d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6 not found: ID does not exist" containerID="d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.439929 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6"} err="failed to get container status \"d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6\": rpc error: code = NotFound desc = could not find container \"d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6\": container with ID starting with d8c9637e1c7760cef2791464d9db3682e0c8c3eaac9b5efa94b61bf754ffd8b6 not found: ID does not exist" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.439949 4949 scope.go:117] "RemoveContainer" containerID="266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c" Feb 16 11:34:28 crc kubenswrapper[4949]: E0216 11:34:28.440198 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c\": container with ID starting with 266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c not found: ID does not exist" containerID="266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.440225 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c"} err="failed to get container status \"266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c\": rpc error: code = NotFound desc = could not find container \"266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c\": container with ID starting with 266f9bd9cac1f8407dcb711849bc5e0e5c38e1701f427af2b28a30d6027a3a7c not found: ID does not exist" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.463054 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.463091 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28eebc18-c4e8-474f-bb20-aa5919f71f9e-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.487380 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.487480 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.488070 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.488124 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.494052 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.494480 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.601351 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.611697 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.613237 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.625427 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.643780 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.660410 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:34:28 crc kubenswrapper[4949]: E0216 11:34:28.661117 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="proxy-httpd" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.661145 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="proxy-httpd" Feb 16 11:34:28 crc kubenswrapper[4949]: E0216 11:34:28.661162 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="ceilometer-central-agent" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.661189 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="ceilometer-central-agent" Feb 16 11:34:28 crc kubenswrapper[4949]: E0216 11:34:28.661219 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="sg-core" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.661230 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="sg-core" Feb 16 11:34:28 crc kubenswrapper[4949]: E0216 11:34:28.661255 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="ceilometer-notification-agent" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.661261 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="ceilometer-notification-agent" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.661490 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="ceilometer-central-agent" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.661513 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="sg-core" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.661525 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="ceilometer-notification-agent" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.661538 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" containerName="proxy-httpd" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.664364 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.667880 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.668079 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.670965 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.709814 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.772042 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.772351 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqwgp\" (UniqueName: \"kubernetes.io/projected/232596fc-0281-4c68-a196-169de41aee49-kube-api-access-jqwgp\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.772465 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-log-httpd\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.772543 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.772612 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.772949 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-run-httpd\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.773442 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-config-data\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.773557 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-scripts\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.876651 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-config-data\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.876730 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-scripts\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.876798 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.876854 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqwgp\" (UniqueName: \"kubernetes.io/projected/232596fc-0281-4c68-a196-169de41aee49-kube-api-access-jqwgp\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.876875 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-log-httpd\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.876894 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.876919 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.876980 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-run-httpd\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.877484 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-run-httpd\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.877808 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-log-httpd\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.882043 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.884104 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.884583 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.884993 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-scripts\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.899244 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqwgp\" (UniqueName: \"kubernetes.io/projected/232596fc-0281-4c68-a196-169de41aee49-kube-api-access-jqwgp\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:28 crc kubenswrapper[4949]: I0216 11:34:28.899842 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-config-data\") pod \"ceilometer-0\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " pod="openstack/ceilometer-0" Feb 16 11:34:29 crc kubenswrapper[4949]: I0216 11:34:29.002036 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:34:29 crc kubenswrapper[4949]: I0216 11:34:29.264464 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28eebc18-c4e8-474f-bb20-aa5919f71f9e" path="/var/lib/kubelet/pods/28eebc18-c4e8-474f-bb20-aa5919f71f9e/volumes" Feb 16 11:34:29 crc kubenswrapper[4949]: I0216 11:34:29.273506 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 16 11:34:29 crc kubenswrapper[4949]: I0216 11:34:29.562790 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:34:29 crc kubenswrapper[4949]: W0216 11:34:29.562787 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod232596fc_0281_4c68_a196_169de41aee49.slice/crio-b74fb5ca8b2f327d0e4fa8c68f010221974bc1ae86f9f9a9b69d5f8ed601358e WatchSource:0}: Error finding container b74fb5ca8b2f327d0e4fa8c68f010221974bc1ae86f9f9a9b69d5f8ed601358e: Status 404 returned error can't find the container with id b74fb5ca8b2f327d0e4fa8c68f010221974bc1ae86f9f9a9b69d5f8ed601358e Feb 16 11:34:30 crc kubenswrapper[4949]: I0216 11:34:30.347594 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerStarted","Data":"b74fb5ca8b2f327d0e4fa8c68f010221974bc1ae86f9f9a9b69d5f8ed601358e"} Feb 16 11:34:31 crc kubenswrapper[4949]: I0216 11:34:31.363924 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerStarted","Data":"4f5987cfcee0d4eaf5bca066f66f2d2dedb3e4f7ee4ea5634e972dd0d04cb774"} Feb 16 11:34:32 crc kubenswrapper[4949]: I0216 11:34:32.393982 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerStarted","Data":"2d001d2a33a227777237ae83b5242609605a65737e02593781ebf3e648575fc3"} Feb 16 11:34:33 crc kubenswrapper[4949]: I0216 11:34:33.407833 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerStarted","Data":"ad72adbb2a85f5f8cca6c83d9583be005af4aa81f79d42acea2e5144a50cfa82"} Feb 16 11:34:34 crc kubenswrapper[4949]: I0216 11:34:34.445524 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerStarted","Data":"2a592931451d1d916cc868c5adb0cda14d9febbe3593b82f0f77a7787285225d"} Feb 16 11:34:34 crc kubenswrapper[4949]: I0216 11:34:34.446195 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:34:34 crc kubenswrapper[4949]: I0216 11:34:34.490161 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.29760742 podStartE2EDuration="6.489940259s" podCreationTimestamp="2026-02-16 11:34:28 +0000 UTC" firstStartedPulling="2026-02-16 11:34:29.565701082 +0000 UTC m=+1659.195035247" lastFinishedPulling="2026-02-16 11:34:33.758033881 +0000 UTC m=+1663.387368086" observedRunningTime="2026-02-16 11:34:34.47811823 +0000 UTC m=+1664.107452395" watchObservedRunningTime="2026-02-16 11:34:34.489940259 +0000 UTC m=+1664.119274424" Feb 16 11:34:35 crc kubenswrapper[4949]: I0216 11:34:35.236507 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:34:35 crc kubenswrapper[4949]: E0216 11:34:35.237082 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:34:35 crc kubenswrapper[4949]: I0216 11:34:35.688349 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Feb 16 11:34:46 crc kubenswrapper[4949]: I0216 11:34:46.236601 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:34:46 crc kubenswrapper[4949]: E0216 11:34:46.239028 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:34:59 crc kubenswrapper[4949]: I0216 11:34:59.020104 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 16 11:35:01 crc kubenswrapper[4949]: I0216 11:35:01.245361 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:35:01 crc kubenswrapper[4949]: E0216 11:35:01.246105 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.371390 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-ztzjs"] Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.383431 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-ztzjs"] Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.471784 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-5lgds"] Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.473812 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.487699 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-5lgds"] Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.577129 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-config-data\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.577228 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-combined-ca-bundle\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.577607 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksbml\" (UniqueName: \"kubernetes.io/projected/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-kube-api-access-ksbml\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.680851 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksbml\" (UniqueName: \"kubernetes.io/projected/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-kube-api-access-ksbml\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.681022 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-config-data\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.681079 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-combined-ca-bundle\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.689590 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-combined-ca-bundle\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.689660 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-config-data\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.700132 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksbml\" (UniqueName: \"kubernetes.io/projected/a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da-kube-api-access-ksbml\") pod \"heat-db-sync-5lgds\" (UID: \"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da\") " pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:10 crc kubenswrapper[4949]: I0216 11:35:10.805323 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5lgds" Feb 16 11:35:11 crc kubenswrapper[4949]: I0216 11:35:11.252115 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="667080fb-b428-4b48-87c9-a955ff09771a" path="/var/lib/kubelet/pods/667080fb-b428-4b48-87c9-a955ff09771a/volumes" Feb 16 11:35:11 crc kubenswrapper[4949]: I0216 11:35:11.382252 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-5lgds"] Feb 16 11:35:11 crc kubenswrapper[4949]: E0216 11:35:11.533382 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:35:11 crc kubenswrapper[4949]: E0216 11:35:11.533451 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:35:11 crc kubenswrapper[4949]: E0216 11:35:11.533611 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:35:11 crc kubenswrapper[4949]: E0216 11:35:11.534959 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:35:11 crc kubenswrapper[4949]: I0216 11:35:11.885185 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5lgds" event={"ID":"a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da","Type":"ContainerStarted","Data":"5c7477abbf7400bf455de210f60ca8feb5e8bcd44a851e8d6e69342956d234dc"} Feb 16 11:35:11 crc kubenswrapper[4949]: E0216 11:35:11.888345 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:35:12 crc kubenswrapper[4949]: I0216 11:35:12.780478 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:35:12 crc kubenswrapper[4949]: I0216 11:35:12.815849 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:35:12 crc kubenswrapper[4949]: I0216 11:35:12.816154 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="ceilometer-central-agent" containerID="cri-o://4f5987cfcee0d4eaf5bca066f66f2d2dedb3e4f7ee4ea5634e972dd0d04cb774" gracePeriod=30 Feb 16 11:35:12 crc kubenswrapper[4949]: I0216 11:35:12.816684 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="proxy-httpd" containerID="cri-o://2a592931451d1d916cc868c5adb0cda14d9febbe3593b82f0f77a7787285225d" gracePeriod=30 Feb 16 11:35:12 crc kubenswrapper[4949]: I0216 11:35:12.816732 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="sg-core" containerID="cri-o://ad72adbb2a85f5f8cca6c83d9583be005af4aa81f79d42acea2e5144a50cfa82" gracePeriod=30 Feb 16 11:35:12 crc kubenswrapper[4949]: I0216 11:35:12.816764 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="ceilometer-notification-agent" containerID="cri-o://2d001d2a33a227777237ae83b5242609605a65737e02593781ebf3e648575fc3" gracePeriod=30 Feb 16 11:35:12 crc kubenswrapper[4949]: E0216 11:35:12.907402 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:35:13 crc kubenswrapper[4949]: I0216 11:35:13.882315 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:35:13 crc kubenswrapper[4949]: I0216 11:35:13.940759 4949 generic.go:334] "Generic (PLEG): container finished" podID="232596fc-0281-4c68-a196-169de41aee49" containerID="2a592931451d1d916cc868c5adb0cda14d9febbe3593b82f0f77a7787285225d" exitCode=0 Feb 16 11:35:13 crc kubenswrapper[4949]: I0216 11:35:13.940796 4949 generic.go:334] "Generic (PLEG): container finished" podID="232596fc-0281-4c68-a196-169de41aee49" containerID="ad72adbb2a85f5f8cca6c83d9583be005af4aa81f79d42acea2e5144a50cfa82" exitCode=2 Feb 16 11:35:13 crc kubenswrapper[4949]: I0216 11:35:13.940809 4949 generic.go:334] "Generic (PLEG): container finished" podID="232596fc-0281-4c68-a196-169de41aee49" containerID="4f5987cfcee0d4eaf5bca066f66f2d2dedb3e4f7ee4ea5634e972dd0d04cb774" exitCode=0 Feb 16 11:35:13 crc kubenswrapper[4949]: I0216 11:35:13.940839 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerDied","Data":"2a592931451d1d916cc868c5adb0cda14d9febbe3593b82f0f77a7787285225d"} Feb 16 11:35:13 crc kubenswrapper[4949]: I0216 11:35:13.940874 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerDied","Data":"ad72adbb2a85f5f8cca6c83d9583be005af4aa81f79d42acea2e5144a50cfa82"} Feb 16 11:35:13 crc kubenswrapper[4949]: I0216 11:35:13.940904 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerDied","Data":"4f5987cfcee0d4eaf5bca066f66f2d2dedb3e4f7ee4ea5634e972dd0d04cb774"} Feb 16 11:35:15 crc kubenswrapper[4949]: I0216 11:35:15.235692 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:35:15 crc kubenswrapper[4949]: E0216 11:35:15.236590 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:35:15 crc kubenswrapper[4949]: I0216 11:35:15.963363 4949 generic.go:334] "Generic (PLEG): container finished" podID="232596fc-0281-4c68-a196-169de41aee49" containerID="2d001d2a33a227777237ae83b5242609605a65737e02593781ebf3e648575fc3" exitCode=0 Feb 16 11:35:15 crc kubenswrapper[4949]: I0216 11:35:15.963451 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerDied","Data":"2d001d2a33a227777237ae83b5242609605a65737e02593781ebf3e648575fc3"} Feb 16 11:35:15 crc kubenswrapper[4949]: I0216 11:35:15.963660 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"232596fc-0281-4c68-a196-169de41aee49","Type":"ContainerDied","Data":"b74fb5ca8b2f327d0e4fa8c68f010221974bc1ae86f9f9a9b69d5f8ed601358e"} Feb 16 11:35:15 crc kubenswrapper[4949]: I0216 11:35:15.963677 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b74fb5ca8b2f327d0e4fa8c68f010221974bc1ae86f9f9a9b69d5f8ed601358e" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.044723 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.136124 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-scripts\") pod \"232596fc-0281-4c68-a196-169de41aee49\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.136204 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-combined-ca-bundle\") pod \"232596fc-0281-4c68-a196-169de41aee49\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.136291 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-run-httpd\") pod \"232596fc-0281-4c68-a196-169de41aee49\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.136478 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-sg-core-conf-yaml\") pod \"232596fc-0281-4c68-a196-169de41aee49\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.136504 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-config-data\") pod \"232596fc-0281-4c68-a196-169de41aee49\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.136565 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqwgp\" (UniqueName: \"kubernetes.io/projected/232596fc-0281-4c68-a196-169de41aee49-kube-api-access-jqwgp\") pod \"232596fc-0281-4c68-a196-169de41aee49\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.136652 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-ceilometer-tls-certs\") pod \"232596fc-0281-4c68-a196-169de41aee49\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.136718 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-log-httpd\") pod \"232596fc-0281-4c68-a196-169de41aee49\" (UID: \"232596fc-0281-4c68-a196-169de41aee49\") " Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.137136 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "232596fc-0281-4c68-a196-169de41aee49" (UID: "232596fc-0281-4c68-a196-169de41aee49"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.137206 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "232596fc-0281-4c68-a196-169de41aee49" (UID: "232596fc-0281-4c68-a196-169de41aee49"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.138184 4949 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.138204 4949 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/232596fc-0281-4c68-a196-169de41aee49-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.157507 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/232596fc-0281-4c68-a196-169de41aee49-kube-api-access-jqwgp" (OuterVolumeSpecName: "kube-api-access-jqwgp") pod "232596fc-0281-4c68-a196-169de41aee49" (UID: "232596fc-0281-4c68-a196-169de41aee49"). InnerVolumeSpecName "kube-api-access-jqwgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.186455 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-scripts" (OuterVolumeSpecName: "scripts") pod "232596fc-0281-4c68-a196-169de41aee49" (UID: "232596fc-0281-4c68-a196-169de41aee49"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.240440 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqwgp\" (UniqueName: \"kubernetes.io/projected/232596fc-0281-4c68-a196-169de41aee49-kube-api-access-jqwgp\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.240630 4949 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-scripts\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.241404 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "232596fc-0281-4c68-a196-169de41aee49" (UID: "232596fc-0281-4c68-a196-169de41aee49"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.292527 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "232596fc-0281-4c68-a196-169de41aee49" (UID: "232596fc-0281-4c68-a196-169de41aee49"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.312316 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-config-data" (OuterVolumeSpecName: "config-data") pod "232596fc-0281-4c68-a196-169de41aee49" (UID: "232596fc-0281-4c68-a196-169de41aee49"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.314613 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "232596fc-0281-4c68-a196-169de41aee49" (UID: "232596fc-0281-4c68-a196-169de41aee49"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.343571 4949 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.343608 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.343617 4949 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.343627 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232596fc-0281-4c68-a196-169de41aee49-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:16 crc kubenswrapper[4949]: I0216 11:35:16.976458 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.027989 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.047632 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.062940 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:35:17 crc kubenswrapper[4949]: E0216 11:35:17.063570 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="sg-core" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.063619 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="sg-core" Feb 16 11:35:17 crc kubenswrapper[4949]: E0216 11:35:17.063676 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="proxy-httpd" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.063686 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="proxy-httpd" Feb 16 11:35:17 crc kubenswrapper[4949]: E0216 11:35:17.063714 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="ceilometer-central-agent" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.063724 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="ceilometer-central-agent" Feb 16 11:35:17 crc kubenswrapper[4949]: E0216 11:35:17.063740 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="ceilometer-notification-agent" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.063751 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="ceilometer-notification-agent" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.064034 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="ceilometer-central-agent" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.064065 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="proxy-httpd" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.064099 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="sg-core" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.064113 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="232596fc-0281-4c68-a196-169de41aee49" containerName="ceilometer-notification-agent" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.084398 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.084539 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.089194 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.089585 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.089754 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.164451 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c69d7379-6f2b-45ae-8972-71e223a337a8-run-httpd\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.164512 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.164655 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.164755 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.164933 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c69d7379-6f2b-45ae-8972-71e223a337a8-log-httpd\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.165057 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-scripts\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.165115 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k7p7\" (UniqueName: \"kubernetes.io/projected/c69d7379-6f2b-45ae-8972-71e223a337a8-kube-api-access-8k7p7\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.165163 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-config-data\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.252914 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="232596fc-0281-4c68-a196-169de41aee49" path="/var/lib/kubelet/pods/232596fc-0281-4c68-a196-169de41aee49/volumes" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.268697 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-scripts\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.268793 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k7p7\" (UniqueName: \"kubernetes.io/projected/c69d7379-6f2b-45ae-8972-71e223a337a8-kube-api-access-8k7p7\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.268856 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-config-data\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.269040 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c69d7379-6f2b-45ae-8972-71e223a337a8-run-httpd\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.269072 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.269137 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.269164 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.269285 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c69d7379-6f2b-45ae-8972-71e223a337a8-log-httpd\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.270910 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c69d7379-6f2b-45ae-8972-71e223a337a8-run-httpd\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.271335 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c69d7379-6f2b-45ae-8972-71e223a337a8-log-httpd\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.273822 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-scripts\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.274376 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.274858 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.275001 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.285450 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c69d7379-6f2b-45ae-8972-71e223a337a8-config-data\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.289938 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k7p7\" (UniqueName: \"kubernetes.io/projected/c69d7379-6f2b-45ae-8972-71e223a337a8-kube-api-access-8k7p7\") pod \"ceilometer-0\" (UID: \"c69d7379-6f2b-45ae-8972-71e223a337a8\") " pod="openstack/ceilometer-0" Feb 16 11:35:17 crc kubenswrapper[4949]: I0216 11:35:17.406231 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 16 11:35:18 crc kubenswrapper[4949]: I0216 11:35:18.015240 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 16 11:35:18 crc kubenswrapper[4949]: E0216 11:35:18.142516 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:35:18 crc kubenswrapper[4949]: E0216 11:35:18.142808 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:35:18 crc kubenswrapper[4949]: E0216 11:35:18.143001 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:35:18 crc kubenswrapper[4949]: I0216 11:35:18.731569 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-2" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerName="rabbitmq" containerID="cri-o://49365de1663135a492ebbe8a93c51b675de302485b8ad97f462bc2835be75d4b" gracePeriod=604795 Feb 16 11:35:18 crc kubenswrapper[4949]: I0216 11:35:18.802791 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="rabbitmq" containerID="cri-o://2bd451c81f0ec8f9fb47a11286a0d4725f67f1e8cea74f5539242bca2957d56c" gracePeriod=604796 Feb 16 11:35:19 crc kubenswrapper[4949]: I0216 11:35:19.003477 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c69d7379-6f2b-45ae-8972-71e223a337a8","Type":"ContainerStarted","Data":"cb0d6559c207719b75e27fa2da485b65e58957182fea7b6686e62af3bb1a61ec"} Feb 16 11:35:19 crc kubenswrapper[4949]: I0216 11:35:19.003535 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c69d7379-6f2b-45ae-8972-71e223a337a8","Type":"ContainerStarted","Data":"b8809b402da1a9ff00bfaa564b720ff96b869106339c27ab2cdc01a4f61674ef"} Feb 16 11:35:20 crc kubenswrapper[4949]: I0216 11:35:20.018190 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c69d7379-6f2b-45ae-8972-71e223a337a8","Type":"ContainerStarted","Data":"55a23aaabacb382758642f427d3ceb3ee9f0a7536913f22659ce2b352fccfee5"} Feb 16 11:35:21 crc kubenswrapper[4949]: I0216 11:35:21.019250 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Feb 16 11:35:21 crc kubenswrapper[4949]: I0216 11:35:21.116139 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Feb 16 11:35:21 crc kubenswrapper[4949]: E0216 11:35:21.164854 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:35:22 crc kubenswrapper[4949]: I0216 11:35:22.042105 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c69d7379-6f2b-45ae-8972-71e223a337a8","Type":"ContainerStarted","Data":"b1003bcb0c8feb0bb13af1a8d2da1e498187ae01ddde0ffb02665b46b3ca145d"} Feb 16 11:35:22 crc kubenswrapper[4949]: I0216 11:35:22.042432 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 16 11:35:22 crc kubenswrapper[4949]: E0216 11:35:22.044878 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:35:23 crc kubenswrapper[4949]: E0216 11:35:23.056087 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.086215 4949 generic.go:334] "Generic (PLEG): container finished" podID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerID="49365de1663135a492ebbe8a93c51b675de302485b8ad97f462bc2835be75d4b" exitCode=0 Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.086324 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a","Type":"ContainerDied","Data":"49365de1663135a492ebbe8a93c51b675de302485b8ad97f462bc2835be75d4b"} Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.090952 4949 generic.go:334] "Generic (PLEG): container finished" podID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerID="2bd451c81f0ec8f9fb47a11286a0d4725f67f1e8cea74f5539242bca2957d56c" exitCode=0 Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.091013 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f715146-edc4-4f1f-80e3-f134d9833f47","Type":"ContainerDied","Data":"2bd451c81f0ec8f9fb47a11286a0d4725f67f1e8cea74f5539242bca2957d56c"} Feb 16 11:35:25 crc kubenswrapper[4949]: E0216 11:35:25.361618 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:35:25 crc kubenswrapper[4949]: E0216 11:35:25.361698 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:35:25 crc kubenswrapper[4949]: E0216 11:35:25.361867 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:35:25 crc kubenswrapper[4949]: E0216 11:35:25.363347 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.611585 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.620246 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736249 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9xqh\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-kube-api-access-l9xqh\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736378 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-tls\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736401 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-confd\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736470 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-erlang-cookie\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736513 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-pod-info\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736548 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-config-data\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736585 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f715146-edc4-4f1f-80e3-f134d9833f47-erlang-cookie-secret\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736632 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f715146-edc4-4f1f-80e3-f134d9833f47-pod-info\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736731 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-tls\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736761 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-plugins\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736843 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-erlang-cookie-secret\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736921 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6l5t\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-kube-api-access-k6l5t\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.736983 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-config-data\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.741922 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.742009 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-confd\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.742077 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-plugins\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.742118 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-plugins-conf\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.742194 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-erlang-cookie\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.746073 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.746135 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-server-conf\") pod \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\" (UID: \"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.746144 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-pod-info" (OuterVolumeSpecName: "pod-info") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.746278 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-plugins-conf\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.746320 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-server-conf\") pod \"0f715146-edc4-4f1f-80e3-f134d9833f47\" (UID: \"0f715146-edc4-4f1f-80e3-f134d9833f47\") " Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.747714 4949 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-pod-info\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.750429 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.751914 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.752344 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f715146-edc4-4f1f-80e3-f134d9833f47-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.752815 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.761773 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.765482 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.770482 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.773389 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.779513 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-kube-api-access-k6l5t" (OuterVolumeSpecName: "kube-api-access-k6l5t") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "kube-api-access-k6l5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.779581 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.787925 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.789433 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/0f715146-edc4-4f1f-80e3-f134d9833f47-pod-info" (OuterVolumeSpecName: "pod-info") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.802159 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-kube-api-access-l9xqh" (OuterVolumeSpecName: "kube-api-access-l9xqh") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "kube-api-access-l9xqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.867805 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-config-data" (OuterVolumeSpecName: "config-data") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871050 4949 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871096 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6l5t\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-kube-api-access-k6l5t\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871110 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871123 4949 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871137 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871147 4949 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871159 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9xqh\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-kube-api-access-l9xqh\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871173 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871203 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871216 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871227 4949 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0f715146-edc4-4f1f-80e3-f134d9833f47-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871236 4949 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0f715146-edc4-4f1f-80e3-f134d9833f47-pod-info\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871249 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.871263 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.967425 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-config-data" (OuterVolumeSpecName: "config-data") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.967753 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-server-conf" (OuterVolumeSpecName: "server-conf") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.978320 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:25 crc kubenswrapper[4949]: I0216 11:35:25.978349 4949 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0f715146-edc4-4f1f-80e3-f134d9833f47-server-conf\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.010740 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9" (OuterVolumeSpecName: "persistence") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.023987 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-server-conf" (OuterVolumeSpecName: "server-conf") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.036070 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58" (OuterVolumeSpecName: "persistence") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "pvc-90830775-583e-4ebc-b837-12928d277c58". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.082160 4949 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-server-conf\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.082628 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") on node \"crc\" " Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.082706 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") on node \"crc\" " Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.117902 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.117931 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"4579a2eb-f9a3-4d32-b67a-d76de7f6a97a","Type":"ContainerDied","Data":"2588f03babc9679c43ce03988cd721bb82dc36c86eeb9933b8dfcd938b9b19ed"} Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.118008 4949 scope.go:117] "RemoveContainer" containerID="49365de1663135a492ebbe8a93c51b675de302485b8ad97f462bc2835be75d4b" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.126093 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0f715146-edc4-4f1f-80e3-f134d9833f47","Type":"ContainerDied","Data":"fc9e07b8da78c26b558ce3d57c7ccaf3be86c18bddf714d178c8caaeaae7c828"} Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.126221 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.134081 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" (UID: "4579a2eb-f9a3-4d32-b67a-d76de7f6a97a"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.142703 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "0f715146-edc4-4f1f-80e3-f134d9833f47" (UID: "0f715146-edc4-4f1f-80e3-f134d9833f47"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.150012 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.150504 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-90830775-583e-4ebc-b837-12928d277c58" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58") on node "crc" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.150766 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.150921 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9") on node "crc" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.185768 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0f715146-edc4-4f1f-80e3-f134d9833f47-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.185964 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.186051 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.186108 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.265360 4949 scope.go:117] "RemoveContainer" containerID="28dbd9a3450bc4c9940c72d847e3a36ef6fd5140215944f266e71e0a01581677" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.296826 4949 scope.go:117] "RemoveContainer" containerID="2bd451c81f0ec8f9fb47a11286a0d4725f67f1e8cea74f5539242bca2957d56c" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.355263 4949 scope.go:117] "RemoveContainer" containerID="f75d823174942d793b39e3e51f5a70d2c313e2e41e562884d6fd8299f71d272c" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.477201 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.490881 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.509106 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.534848 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.560555 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:35:26 crc kubenswrapper[4949]: E0216 11:35:26.561452 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="rabbitmq" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.561485 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="rabbitmq" Feb 16 11:35:26 crc kubenswrapper[4949]: E0216 11:35:26.561538 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerName="setup-container" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.561545 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerName="setup-container" Feb 16 11:35:26 crc kubenswrapper[4949]: E0216 11:35:26.561559 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="setup-container" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.561566 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="setup-container" Feb 16 11:35:26 crc kubenswrapper[4949]: E0216 11:35:26.561582 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerName="rabbitmq" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.561588 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerName="rabbitmq" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.561898 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" containerName="rabbitmq" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.561939 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" containerName="rabbitmq" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.563640 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.584685 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.597054 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.599636 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.602294 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.606818 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.606944 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.607033 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.607111 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.607268 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.607289 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-mffhw" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.615261 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.699524 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/701d055f-9bdd-4661-94ac-d8e04866c31f-pod-info\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.699900 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.700141 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.700378 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/701d055f-9bdd-4661-94ac-d8e04866c31f-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.700527 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.700706 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.700826 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a059bd07-34ae-4e84-8ffd-19eb56597b33-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.701004 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.701147 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.701353 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-config-data\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.701473 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.701655 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.701909 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mwcz\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-kube-api-access-9mwcz\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.702036 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.702223 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.702389 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.702533 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.702658 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wkvj\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-kube-api-access-4wkvj\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.702812 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.702931 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.703097 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a059bd07-34ae-4e84-8ffd-19eb56597b33-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.703337 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-server-conf\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.806147 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mwcz\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-kube-api-access-9mwcz\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.806432 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.806561 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.806651 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.806744 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.806819 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wkvj\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-kube-api-access-4wkvj\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.806905 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.806985 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807071 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a059bd07-34ae-4e84-8ffd-19eb56597b33-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807173 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-server-conf\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807307 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/701d055f-9bdd-4661-94ac-d8e04866c31f-pod-info\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807411 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807499 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807573 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/701d055f-9bdd-4661-94ac-d8e04866c31f-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807640 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807723 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807794 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a059bd07-34ae-4e84-8ffd-19eb56597b33-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807867 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.807911 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.808032 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.808116 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-config-data\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.808209 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.808304 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.808666 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.808985 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.811304 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.811664 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.812095 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.812468 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a059bd07-34ae-4e84-8ffd-19eb56597b33-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.812592 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-server-conf\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.812971 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-config-data\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.813081 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/701d055f-9bdd-4661-94ac-d8e04866c31f-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.813148 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a059bd07-34ae-4e84-8ffd-19eb56597b33-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.813371 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.813383 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.814143 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.814196 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f1fa8d0095524c016fc22ddccb6ce776ebb58da8e1711e3d6932ae5bed958a29/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.814201 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.814527 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.814563 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/109184ac86226b22c0d6666a38f57f93574572ac48e48f16154f0acaca50e7d1/globalmount\"" pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.814594 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a059bd07-34ae-4e84-8ffd-19eb56597b33-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.815126 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.815198 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/701d055f-9bdd-4661-94ac-d8e04866c31f-pod-info\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.816256 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/701d055f-9bdd-4661-94ac-d8e04866c31f-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.821873 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mwcz\" (UniqueName: \"kubernetes.io/projected/a059bd07-34ae-4e84-8ffd-19eb56597b33-kube-api-access-9mwcz\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.824856 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wkvj\" (UniqueName: \"kubernetes.io/projected/701d055f-9bdd-4661-94ac-d8e04866c31f-kube-api-access-4wkvj\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.885818 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f3cf97a-fe2e-4a88-bca4-7f49ec95d9a9\") pod \"rabbitmq-cell1-server-0\" (UID: \"a059bd07-34ae-4e84-8ffd-19eb56597b33\") " pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.890776 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-90830775-583e-4ebc-b837-12928d277c58\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90830775-583e-4ebc-b837-12928d277c58\") pod \"rabbitmq-server-2\" (UID: \"701d055f-9bdd-4661-94ac-d8e04866c31f\") " pod="openstack/rabbitmq-server-2" Feb 16 11:35:26 crc kubenswrapper[4949]: I0216 11:35:26.937817 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.044529 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68df85789f-bhwkl"] Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.047586 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.057717 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.087257 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-bhwkl"] Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.121082 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.121213 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.121270 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-config\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.121392 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z22b7\" (UniqueName: \"kubernetes.io/projected/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-kube-api-access-z22b7\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.121431 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-svc\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.121532 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.121781 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.185485 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.224351 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.224510 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.224559 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.224581 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-config\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.224636 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z22b7\" (UniqueName: \"kubernetes.io/projected/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-kube-api-access-z22b7\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.224657 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-svc\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.224694 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.225602 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.225835 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-svc\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.225844 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-config\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.226250 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.226522 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.234891 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.243829 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z22b7\" (UniqueName: \"kubernetes.io/projected/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-kube-api-access-z22b7\") pod \"dnsmasq-dns-68df85789f-bhwkl\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.265363 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f715146-edc4-4f1f-80e3-f134d9833f47" path="/var/lib/kubelet/pods/0f715146-edc4-4f1f-80e3-f134d9833f47/volumes" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.266813 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4579a2eb-f9a3-4d32-b67a-d76de7f6a97a" path="/var/lib/kubelet/pods/4579a2eb-f9a3-4d32-b67a-d76de7f6a97a/volumes" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.432040 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.615686 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 16 11:35:27 crc kubenswrapper[4949]: I0216 11:35:27.825317 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 16 11:35:28 crc kubenswrapper[4949]: W0216 11:35:28.047342 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09d30ee5_4073_4e9e_9a7e_db3c8bfb76eb.slice/crio-aa7f4cbaba229c6f622cac45339c06082e9df6506eb2328c72ffd64e25b9b088 WatchSource:0}: Error finding container aa7f4cbaba229c6f622cac45339c06082e9df6506eb2328c72ffd64e25b9b088: Status 404 returned error can't find the container with id aa7f4cbaba229c6f622cac45339c06082e9df6506eb2328c72ffd64e25b9b088 Feb 16 11:35:28 crc kubenswrapper[4949]: I0216 11:35:28.051610 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-bhwkl"] Feb 16 11:35:28 crc kubenswrapper[4949]: I0216 11:35:28.240469 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:35:28 crc kubenswrapper[4949]: E0216 11:35:28.241029 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:35:28 crc kubenswrapper[4949]: I0216 11:35:28.259489 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a059bd07-34ae-4e84-8ffd-19eb56597b33","Type":"ContainerStarted","Data":"02c0641be7f42f482a963c9a71a4c19391c095dd5ee061981ed1847450d5f1b4"} Feb 16 11:35:28 crc kubenswrapper[4949]: I0216 11:35:28.266072 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"701d055f-9bdd-4661-94ac-d8e04866c31f","Type":"ContainerStarted","Data":"8991ce8986fdb975118701c29cb513b5740f2a38569458a17fbfc75dee5890a7"} Feb 16 11:35:28 crc kubenswrapper[4949]: I0216 11:35:28.268699 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" event={"ID":"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb","Type":"ContainerStarted","Data":"aa7f4cbaba229c6f622cac45339c06082e9df6506eb2328c72ffd64e25b9b088"} Feb 16 11:35:29 crc kubenswrapper[4949]: I0216 11:35:29.280664 4949 generic.go:334] "Generic (PLEG): container finished" podID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" containerID="10766010cab785a44b45bd450eed66e091de03d9b31958643f304d9e9d741bd1" exitCode=0 Feb 16 11:35:29 crc kubenswrapper[4949]: I0216 11:35:29.280719 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" event={"ID":"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb","Type":"ContainerDied","Data":"10766010cab785a44b45bd450eed66e091de03d9b31958643f304d9e9d741bd1"} Feb 16 11:35:30 crc kubenswrapper[4949]: I0216 11:35:30.307775 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a059bd07-34ae-4e84-8ffd-19eb56597b33","Type":"ContainerStarted","Data":"5f9f939393d6aa3c1fbd961f8241f6f5fdf210bdf0827a4722494776fe97647b"} Feb 16 11:35:30 crc kubenswrapper[4949]: I0216 11:35:30.318234 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"701d055f-9bdd-4661-94ac-d8e04866c31f","Type":"ContainerStarted","Data":"3ff9f90f525774fe3780afea7afb287b758873d939b997e44c3a79ce1e13aca6"} Feb 16 11:35:30 crc kubenswrapper[4949]: I0216 11:35:30.322658 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" event={"ID":"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb","Type":"ContainerStarted","Data":"01a7a6393fc686b9b7f70c486ea4e942e950aa0da1cfc528ff493eef314e4238"} Feb 16 11:35:30 crc kubenswrapper[4949]: I0216 11:35:30.324292 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:30 crc kubenswrapper[4949]: I0216 11:35:30.408956 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" podStartSLOduration=4.408927219 podStartE2EDuration="4.408927219s" podCreationTimestamp="2026-02-16 11:35:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:35:30.395283268 +0000 UTC m=+1720.024617473" watchObservedRunningTime="2026-02-16 11:35:30.408927219 +0000 UTC m=+1720.038261384" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.250220 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 16 11:35:37 crc kubenswrapper[4949]: E0216 11:35:37.379814 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:35:37 crc kubenswrapper[4949]: E0216 11:35:37.379892 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:35:37 crc kubenswrapper[4949]: E0216 11:35:37.380038 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:35:37 crc kubenswrapper[4949]: E0216 11:35:37.381240 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:35:37 crc kubenswrapper[4949]: E0216 11:35:37.417654 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.434355 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.534824 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-2qhff"] Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.535129 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" podUID="d0fc7610-06f6-47af-b194-113413f5b260" containerName="dnsmasq-dns" containerID="cri-o://4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082" gracePeriod=10 Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.724936 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-fbntz"] Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.728082 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.758853 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-config\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.759059 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2m84\" (UniqueName: \"kubernetes.io/projected/922556c8-4241-4be1-99cd-66eda9892b23-kube-api-access-x2m84\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.759139 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.759252 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-dns-svc\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.759269 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.759298 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.759433 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.769258 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-fbntz"] Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.864336 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2m84\" (UniqueName: \"kubernetes.io/projected/922556c8-4241-4be1-99cd-66eda9892b23-kube-api-access-x2m84\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.864448 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.864506 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-dns-svc\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.864526 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.864547 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.864618 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.864705 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-config\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.865863 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-config\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.866732 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.866901 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.867062 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.867571 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.868201 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/922556c8-4241-4be1-99cd-66eda9892b23-dns-svc\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:37 crc kubenswrapper[4949]: I0216 11:35:37.891429 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2m84\" (UniqueName: \"kubernetes.io/projected/922556c8-4241-4be1-99cd-66eda9892b23-kube-api-access-x2m84\") pod \"dnsmasq-dns-bb85b8995-fbntz\" (UID: \"922556c8-4241-4be1-99cd-66eda9892b23\") " pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.093195 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.366022 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.386982 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-swift-storage-0\") pod \"d0fc7610-06f6-47af-b194-113413f5b260\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.387083 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-nb\") pod \"d0fc7610-06f6-47af-b194-113413f5b260\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.387118 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-sb\") pod \"d0fc7610-06f6-47af-b194-113413f5b260\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.387309 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m298n\" (UniqueName: \"kubernetes.io/projected/d0fc7610-06f6-47af-b194-113413f5b260-kube-api-access-m298n\") pod \"d0fc7610-06f6-47af-b194-113413f5b260\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.387353 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-config\") pod \"d0fc7610-06f6-47af-b194-113413f5b260\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.387395 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-svc\") pod \"d0fc7610-06f6-47af-b194-113413f5b260\" (UID: \"d0fc7610-06f6-47af-b194-113413f5b260\") " Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.413559 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0fc7610-06f6-47af-b194-113413f5b260-kube-api-access-m298n" (OuterVolumeSpecName: "kube-api-access-m298n") pod "d0fc7610-06f6-47af-b194-113413f5b260" (UID: "d0fc7610-06f6-47af-b194-113413f5b260"). InnerVolumeSpecName "kube-api-access-m298n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.478211 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d0fc7610-06f6-47af-b194-113413f5b260" (UID: "d0fc7610-06f6-47af-b194-113413f5b260"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.491735 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m298n\" (UniqueName: \"kubernetes.io/projected/d0fc7610-06f6-47af-b194-113413f5b260-kube-api-access-m298n\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.491760 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.516650 4949 generic.go:334] "Generic (PLEG): container finished" podID="d0fc7610-06f6-47af-b194-113413f5b260" containerID="4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082" exitCode=0 Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.516710 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" event={"ID":"d0fc7610-06f6-47af-b194-113413f5b260","Type":"ContainerDied","Data":"4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082"} Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.516748 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" event={"ID":"d0fc7610-06f6-47af-b194-113413f5b260","Type":"ContainerDied","Data":"c6cc995d9c48c00761150b83ac1f350cbf78ad70801937d6318ea52b1c0b0fb1"} Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.516776 4949 scope.go:117] "RemoveContainer" containerID="4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.517063 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-2qhff" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.587031 4949 scope.go:117] "RemoveContainer" containerID="e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.591662 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d0fc7610-06f6-47af-b194-113413f5b260" (UID: "d0fc7610-06f6-47af-b194-113413f5b260"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.595401 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.663289 4949 scope.go:117] "RemoveContainer" containerID="4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082" Feb 16 11:35:38 crc kubenswrapper[4949]: E0216 11:35:38.667716 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082\": container with ID starting with 4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082 not found: ID does not exist" containerID="4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.667764 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082"} err="failed to get container status \"4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082\": rpc error: code = NotFound desc = could not find container \"4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082\": container with ID starting with 4ab2b185c205e787e685b3c44125afa806f32f996c5415f9817dd20fdb69d082 not found: ID does not exist" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.667795 4949 scope.go:117] "RemoveContainer" containerID="e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.675733 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-config" (OuterVolumeSpecName: "config") pod "d0fc7610-06f6-47af-b194-113413f5b260" (UID: "d0fc7610-06f6-47af-b194-113413f5b260"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:38 crc kubenswrapper[4949]: E0216 11:35:38.677750 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2\": container with ID starting with e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2 not found: ID does not exist" containerID="e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.677795 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2"} err="failed to get container status \"e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2\": rpc error: code = NotFound desc = could not find container \"e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2\": container with ID starting with e255728c952daf3776e54e8bb2d723dbdf9bd956a0aa89399b23dc15afbe84f2 not found: ID does not exist" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.680042 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d0fc7610-06f6-47af-b194-113413f5b260" (UID: "d0fc7610-06f6-47af-b194-113413f5b260"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.702094 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.702381 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.761849 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-fbntz"] Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.762730 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d0fc7610-06f6-47af-b194-113413f5b260" (UID: "d0fc7610-06f6-47af-b194-113413f5b260"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:38 crc kubenswrapper[4949]: I0216 11:35:38.805373 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0fc7610-06f6-47af-b194-113413f5b260-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:39 crc kubenswrapper[4949]: I0216 11:35:39.012341 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-2qhff"] Feb 16 11:35:39 crc kubenswrapper[4949]: I0216 11:35:39.031317 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-2qhff"] Feb 16 11:35:39 crc kubenswrapper[4949]: E0216 11:35:39.239101 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:35:39 crc kubenswrapper[4949]: I0216 11:35:39.275579 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0fc7610-06f6-47af-b194-113413f5b260" path="/var/lib/kubelet/pods/d0fc7610-06f6-47af-b194-113413f5b260/volumes" Feb 16 11:35:39 crc kubenswrapper[4949]: I0216 11:35:39.534782 4949 generic.go:334] "Generic (PLEG): container finished" podID="922556c8-4241-4be1-99cd-66eda9892b23" containerID="29e7b86896573608e932c9c92b35f4797e446b3654d2697245062c893b9d1546" exitCode=0 Feb 16 11:35:39 crc kubenswrapper[4949]: I0216 11:35:39.534832 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-fbntz" event={"ID":"922556c8-4241-4be1-99cd-66eda9892b23","Type":"ContainerDied","Data":"29e7b86896573608e932c9c92b35f4797e446b3654d2697245062c893b9d1546"} Feb 16 11:35:39 crc kubenswrapper[4949]: I0216 11:35:39.535109 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-fbntz" event={"ID":"922556c8-4241-4be1-99cd-66eda9892b23","Type":"ContainerStarted","Data":"df375bc1aee7a5ba1d452144bc95e09b3fca8caede19c57e7c77c20db00434cc"} Feb 16 11:35:40 crc kubenswrapper[4949]: I0216 11:35:40.548659 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-fbntz" event={"ID":"922556c8-4241-4be1-99cd-66eda9892b23","Type":"ContainerStarted","Data":"54ae49724a37c31d08dc1897c636e9f7a51b6663639fdd54e869dc876d282893"} Feb 16 11:35:40 crc kubenswrapper[4949]: I0216 11:35:40.548988 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:40 crc kubenswrapper[4949]: I0216 11:35:40.579596 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bb85b8995-fbntz" podStartSLOduration=3.579566841 podStartE2EDuration="3.579566841s" podCreationTimestamp="2026-02-16 11:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:35:40.571964653 +0000 UTC m=+1730.201298818" watchObservedRunningTime="2026-02-16 11:35:40.579566841 +0000 UTC m=+1730.208901026" Feb 16 11:35:41 crc kubenswrapper[4949]: I0216 11:35:41.251952 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:35:41 crc kubenswrapper[4949]: E0216 11:35:41.252275 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:35:48 crc kubenswrapper[4949]: I0216 11:35:48.095094 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bb85b8995-fbntz" Feb 16 11:35:48 crc kubenswrapper[4949]: I0216 11:35:48.194555 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-bhwkl"] Feb 16 11:35:48 crc kubenswrapper[4949]: I0216 11:35:48.194833 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" podUID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" containerName="dnsmasq-dns" containerID="cri-o://01a7a6393fc686b9b7f70c486ea4e942e950aa0da1cfc528ff493eef314e4238" gracePeriod=10 Feb 16 11:35:48 crc kubenswrapper[4949]: I0216 11:35:48.652575 4949 generic.go:334] "Generic (PLEG): container finished" podID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" containerID="01a7a6393fc686b9b7f70c486ea4e942e950aa0da1cfc528ff493eef314e4238" exitCode=0 Feb 16 11:35:48 crc kubenswrapper[4949]: I0216 11:35:48.652947 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" event={"ID":"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb","Type":"ContainerDied","Data":"01a7a6393fc686b9b7f70c486ea4e942e950aa0da1cfc528ff493eef314e4238"} Feb 16 11:35:48 crc kubenswrapper[4949]: I0216 11:35:48.847767 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.001239 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z22b7\" (UniqueName: \"kubernetes.io/projected/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-kube-api-access-z22b7\") pod \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.001605 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-nb\") pod \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.001724 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-openstack-edpm-ipam\") pod \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.001813 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-swift-storage-0\") pod \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.001949 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-sb\") pod \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.002063 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-svc\") pod \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.002225 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-config\") pod \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\" (UID: \"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb\") " Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.014623 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-kube-api-access-z22b7" (OuterVolumeSpecName: "kube-api-access-z22b7") pod "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" (UID: "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb"). InnerVolumeSpecName "kube-api-access-z22b7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.077324 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" (UID: "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.081385 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" (UID: "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.090785 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" (UID: "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.094485 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-config" (OuterVolumeSpecName: "config") pod "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" (UID: "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.111921 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z22b7\" (UniqueName: \"kubernetes.io/projected/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-kube-api-access-z22b7\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.111967 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.111977 4949 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.111985 4949 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.111994 4949 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-config\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.119528 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" (UID: "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.126987 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" (UID: "09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.214467 4949 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.214519 4949 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.669509 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" event={"ID":"09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb","Type":"ContainerDied","Data":"aa7f4cbaba229c6f622cac45339c06082e9df6506eb2328c72ffd64e25b9b088"} Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.669964 4949 scope.go:117] "RemoveContainer" containerID="01a7a6393fc686b9b7f70c486ea4e942e950aa0da1cfc528ff493eef314e4238" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.669798 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-bhwkl" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.703740 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-bhwkl"] Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.717580 4949 scope.go:117] "RemoveContainer" containerID="10766010cab785a44b45bd450eed66e091de03d9b31958643f304d9e9d741bd1" Feb 16 11:35:49 crc kubenswrapper[4949]: I0216 11:35:49.720030 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-bhwkl"] Feb 16 11:35:51 crc kubenswrapper[4949]: E0216 11:35:51.249782 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:35:51 crc kubenswrapper[4949]: I0216 11:35:51.250426 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" path="/var/lib/kubelet/pods/09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb/volumes" Feb 16 11:35:53 crc kubenswrapper[4949]: E0216 11:35:53.386124 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:35:53 crc kubenswrapper[4949]: E0216 11:35:53.386468 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:35:53 crc kubenswrapper[4949]: E0216 11:35:53.386613 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:35:53 crc kubenswrapper[4949]: E0216 11:35:53.387794 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:35:54 crc kubenswrapper[4949]: I0216 11:35:54.547561 4949 scope.go:117] "RemoveContainer" containerID="c9a4b5d8aa15e598d9c2f85ea3b85e9f0970f9c71a53e01262982fa1a7449218" Feb 16 11:35:54 crc kubenswrapper[4949]: I0216 11:35:54.590141 4949 scope.go:117] "RemoveContainer" containerID="b1586a2ebe8bc4c77bd82b8125c5374baef9f2d70420c2ecdee21fb3eb775a3b" Feb 16 11:35:54 crc kubenswrapper[4949]: I0216 11:35:54.630657 4949 scope.go:117] "RemoveContainer" containerID="8a744026cd1e3a9a8a75a94fced472e587930e54ef454c1938f4129de226cd1c" Feb 16 11:35:54 crc kubenswrapper[4949]: I0216 11:35:54.681513 4949 scope.go:117] "RemoveContainer" containerID="cd2623268180de8b4383b823158db3c698c76e8cfb0120e6e2ea8cd841746ae4" Feb 16 11:35:54 crc kubenswrapper[4949]: I0216 11:35:54.731214 4949 scope.go:117] "RemoveContainer" containerID="ddb331e8524da19fc18eb470ff5dd75b4ca4898845ffdaef8d31c8d24ec537a8" Feb 16 11:35:54 crc kubenswrapper[4949]: I0216 11:35:54.761574 4949 scope.go:117] "RemoveContainer" containerID="f33caa37b2130d80ccd20ffb48d1c41c74fb07d9a68e196bd134751c2fcb1e00" Feb 16 11:35:54 crc kubenswrapper[4949]: I0216 11:35:54.825556 4949 scope.go:117] "RemoveContainer" containerID="8777d8e1b0069e26d970e9341adf3c6b26e914131602291d755d47675b7fa4ff" Feb 16 11:35:54 crc kubenswrapper[4949]: I0216 11:35:54.876830 4949 scope.go:117] "RemoveContainer" containerID="9e8b3602a501feea39bba77d0760d9ccd52744e0b128acc546f39714fdffa8eb" Feb 16 11:35:55 crc kubenswrapper[4949]: I0216 11:35:55.236095 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:35:55 crc kubenswrapper[4949]: E0216 11:35:55.236752 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.925473 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv"] Feb 16 11:36:01 crc kubenswrapper[4949]: E0216 11:36:01.926632 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fc7610-06f6-47af-b194-113413f5b260" containerName="init" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.926650 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fc7610-06f6-47af-b194-113413f5b260" containerName="init" Feb 16 11:36:01 crc kubenswrapper[4949]: E0216 11:36:01.926682 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" containerName="dnsmasq-dns" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.926691 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" containerName="dnsmasq-dns" Feb 16 11:36:01 crc kubenswrapper[4949]: E0216 11:36:01.926708 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fc7610-06f6-47af-b194-113413f5b260" containerName="dnsmasq-dns" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.926715 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fc7610-06f6-47af-b194-113413f5b260" containerName="dnsmasq-dns" Feb 16 11:36:01 crc kubenswrapper[4949]: E0216 11:36:01.926740 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" containerName="init" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.926746 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" containerName="init" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.927119 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="09d30ee5-4073-4e9e-9a7e-db3c8bfb76eb" containerName="dnsmasq-dns" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.927134 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0fc7610-06f6-47af-b194-113413f5b260" containerName="dnsmasq-dns" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.928128 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.933030 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.933124 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.933284 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.933338 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 11:36:01 crc kubenswrapper[4949]: I0216 11:36:01.943696 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv"] Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.079587 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.079674 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtdnk\" (UniqueName: \"kubernetes.io/projected/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-kube-api-access-gtdnk\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.080938 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.081095 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.183596 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.183796 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtdnk\" (UniqueName: \"kubernetes.io/projected/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-kube-api-access-gtdnk\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.184272 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.184483 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.194075 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.194116 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.194270 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.203609 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtdnk\" (UniqueName: \"kubernetes.io/projected/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-kube-api-access-gtdnk\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.254262 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:02 crc kubenswrapper[4949]: E0216 11:36:02.361754 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:36:02 crc kubenswrapper[4949]: E0216 11:36:02.361821 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:36:02 crc kubenswrapper[4949]: E0216 11:36:02.361957 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:36:02 crc kubenswrapper[4949]: E0216 11:36:02.363194 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.841847 4949 generic.go:334] "Generic (PLEG): container finished" podID="701d055f-9bdd-4661-94ac-d8e04866c31f" containerID="3ff9f90f525774fe3780afea7afb287b758873d939b997e44c3a79ce1e13aca6" exitCode=0 Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.841921 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"701d055f-9bdd-4661-94ac-d8e04866c31f","Type":"ContainerDied","Data":"3ff9f90f525774fe3780afea7afb287b758873d939b997e44c3a79ce1e13aca6"} Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.847586 4949 generic.go:334] "Generic (PLEG): container finished" podID="a059bd07-34ae-4e84-8ffd-19eb56597b33" containerID="5f9f939393d6aa3c1fbd961f8241f6f5fdf210bdf0827a4722494776fe97647b" exitCode=0 Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.847678 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a059bd07-34ae-4e84-8ffd-19eb56597b33","Type":"ContainerDied","Data":"5f9f939393d6aa3c1fbd961f8241f6f5fdf210bdf0827a4722494776fe97647b"} Feb 16 11:36:02 crc kubenswrapper[4949]: I0216 11:36:02.981661 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv"] Feb 16 11:36:03 crc kubenswrapper[4949]: I0216 11:36:03.862209 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" event={"ID":"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a","Type":"ContainerStarted","Data":"8540e920c72f870564662bcbfdbec5d97af0db8bbf6990f6f9e5ed379fade456"} Feb 16 11:36:03 crc kubenswrapper[4949]: I0216 11:36:03.865093 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a059bd07-34ae-4e84-8ffd-19eb56597b33","Type":"ContainerStarted","Data":"e57c07be3e70eea2ecd9952398db395d14ed48a67469c5efcd00188ebe9a254b"} Feb 16 11:36:03 crc kubenswrapper[4949]: I0216 11:36:03.865431 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:36:03 crc kubenswrapper[4949]: I0216 11:36:03.874760 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"701d055f-9bdd-4661-94ac-d8e04866c31f","Type":"ContainerStarted","Data":"425eb89d443810f9e005f44f87c7f0044ed344a64a9113745be2c1438dab051a"} Feb 16 11:36:03 crc kubenswrapper[4949]: I0216 11:36:03.876087 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Feb 16 11:36:03 crc kubenswrapper[4949]: I0216 11:36:03.900540 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.900517181 podStartE2EDuration="37.900517181s" podCreationTimestamp="2026-02-16 11:35:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:36:03.886811948 +0000 UTC m=+1753.516146133" watchObservedRunningTime="2026-02-16 11:36:03.900517181 +0000 UTC m=+1753.529851346" Feb 16 11:36:03 crc kubenswrapper[4949]: I0216 11:36:03.932754 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=37.932733675 podStartE2EDuration="37.932733675s" podCreationTimestamp="2026-02-16 11:35:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:36:03.916903701 +0000 UTC m=+1753.546237886" watchObservedRunningTime="2026-02-16 11:36:03.932733675 +0000 UTC m=+1753.562067840" Feb 16 11:36:06 crc kubenswrapper[4949]: I0216 11:36:06.235994 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:36:06 crc kubenswrapper[4949]: E0216 11:36:06.237872 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:36:07 crc kubenswrapper[4949]: E0216 11:36:07.238818 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:36:15 crc kubenswrapper[4949]: I0216 11:36:15.020097 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" event={"ID":"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a","Type":"ContainerStarted","Data":"6e060c31c0d63ae82da1c50e8460a1647e71ea7e814ac2f4cca352b988cdf5d4"} Feb 16 11:36:15 crc kubenswrapper[4949]: I0216 11:36:15.056066 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" podStartSLOduration=2.693529507 podStartE2EDuration="14.056034935s" podCreationTimestamp="2026-02-16 11:36:01 +0000 UTC" firstStartedPulling="2026-02-16 11:36:02.923529336 +0000 UTC m=+1752.552863501" lastFinishedPulling="2026-02-16 11:36:14.286034754 +0000 UTC m=+1763.915368929" observedRunningTime="2026-02-16 11:36:15.047465849 +0000 UTC m=+1764.676800024" watchObservedRunningTime="2026-02-16 11:36:15.056034935 +0000 UTC m=+1764.685369110" Feb 16 11:36:16 crc kubenswrapper[4949]: E0216 11:36:16.239565 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:36:16 crc kubenswrapper[4949]: I0216 11:36:16.941522 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Feb 16 11:36:17 crc kubenswrapper[4949]: I0216 11:36:17.188482 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Feb 16 11:36:17 crc kubenswrapper[4949]: I0216 11:36:17.254453 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:36:19 crc kubenswrapper[4949]: I0216 11:36:19.235665 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:36:19 crc kubenswrapper[4949]: E0216 11:36:19.236354 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:36:19 crc kubenswrapper[4949]: E0216 11:36:19.239507 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:36:22 crc kubenswrapper[4949]: I0216 11:36:22.299509 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-1" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerName="rabbitmq" containerID="cri-o://7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f" gracePeriod=604795 Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.008263 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lk6vt"] Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.014900 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.029920 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lk6vt"] Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.110036 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj92l\" (UniqueName: \"kubernetes.io/projected/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-kube-api-access-qj92l\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.110510 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-catalog-content\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.110804 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-utilities\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.213011 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-catalog-content\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.213391 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-utilities\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.213508 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj92l\" (UniqueName: \"kubernetes.io/projected/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-kube-api-access-qj92l\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.213595 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-catalog-content\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.213879 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-utilities\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.244144 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj92l\" (UniqueName: \"kubernetes.io/projected/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-kube-api-access-qj92l\") pod \"redhat-marketplace-lk6vt\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.356783 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:25 crc kubenswrapper[4949]: I0216 11:36:25.917422 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lk6vt"] Feb 16 11:36:25 crc kubenswrapper[4949]: W0216 11:36:25.918032 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddeeafd19_11a8_449b_9ff0_c2f177ceb4b0.slice/crio-3b1801480956b9f6e8f2cc636f034ed5383eab2f36b3df82af3c8d5fa8ae2464 WatchSource:0}: Error finding container 3b1801480956b9f6e8f2cc636f034ed5383eab2f36b3df82af3c8d5fa8ae2464: Status 404 returned error can't find the container with id 3b1801480956b9f6e8f2cc636f034ed5383eab2f36b3df82af3c8d5fa8ae2464 Feb 16 11:36:26 crc kubenswrapper[4949]: I0216 11:36:26.177055 4949 generic.go:334] "Generic (PLEG): container finished" podID="86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a" containerID="6e060c31c0d63ae82da1c50e8460a1647e71ea7e814ac2f4cca352b988cdf5d4" exitCode=0 Feb 16 11:36:26 crc kubenswrapper[4949]: I0216 11:36:26.177129 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" event={"ID":"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a","Type":"ContainerDied","Data":"6e060c31c0d63ae82da1c50e8460a1647e71ea7e814ac2f4cca352b988cdf5d4"} Feb 16 11:36:26 crc kubenswrapper[4949]: I0216 11:36:26.183817 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lk6vt" event={"ID":"deeafd19-11a8-449b-9ff0-c2f177ceb4b0","Type":"ContainerStarted","Data":"d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342"} Feb 16 11:36:26 crc kubenswrapper[4949]: I0216 11:36:26.183874 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lk6vt" event={"ID":"deeafd19-11a8-449b-9ff0-c2f177ceb4b0","Type":"ContainerStarted","Data":"3b1801480956b9f6e8f2cc636f034ed5383eab2f36b3df82af3c8d5fa8ae2464"} Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.207070 4949 generic.go:334] "Generic (PLEG): container finished" podID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerID="d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342" exitCode=0 Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.207145 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lk6vt" event={"ID":"deeafd19-11a8-449b-9ff0-c2f177ceb4b0","Type":"ContainerDied","Data":"d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342"} Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.757743 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.882162 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-ssh-key-openstack-edpm-ipam\") pod \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.882918 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-repo-setup-combined-ca-bundle\") pod \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.883400 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-inventory\") pod \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.883634 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtdnk\" (UniqueName: \"kubernetes.io/projected/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-kube-api-access-gtdnk\") pod \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\" (UID: \"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a\") " Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.896340 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a" (UID: "86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.897255 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-kube-api-access-gtdnk" (OuterVolumeSpecName: "kube-api-access-gtdnk") pod "86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a" (UID: "86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a"). InnerVolumeSpecName "kube-api-access-gtdnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.940428 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a" (UID: "86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.946320 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-inventory" (OuterVolumeSpecName: "inventory") pod "86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a" (UID: "86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.997388 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtdnk\" (UniqueName: \"kubernetes.io/projected/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-kube-api-access-gtdnk\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.997446 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.997457 4949 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:27 crc kubenswrapper[4949]: I0216 11:36:27.997468 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.224461 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" event={"ID":"86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a","Type":"ContainerDied","Data":"8540e920c72f870564662bcbfdbec5d97af0db8bbf6990f6f9e5ed379fade456"} Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.224751 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8540e920c72f870564662bcbfdbec5d97af0db8bbf6990f6f9e5ed379fade456" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.224553 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.316806 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6"] Feb 16 11:36:28 crc kubenswrapper[4949]: E0216 11:36:28.317379 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.317398 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.317674 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.318610 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.322954 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.329865 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6"] Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.330343 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.330527 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.331081 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.512411 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.512518 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.512617 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glr48\" (UniqueName: \"kubernetes.io/projected/8bd581a9-2646-4045-bfb1-a0a4e356936d-kube-api-access-glr48\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.615678 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.615812 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.615892 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glr48\" (UniqueName: \"kubernetes.io/projected/8bd581a9-2646-4045-bfb1-a0a4e356936d-kube-api-access-glr48\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.622571 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.622829 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.638085 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glr48\" (UniqueName: \"kubernetes.io/projected/8bd581a9-2646-4045-bfb1-a0a4e356936d-kube-api-access-glr48\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-f25z6\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.647305 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:28 crc kubenswrapper[4949]: I0216 11:36:28.916377 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.027676 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-confd\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028257 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028367 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-tls\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028403 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2fd90353-44d0-4269-84cc-f90c10eb6da4-pod-info\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028432 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-plugins\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028484 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-plugins-conf\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028569 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-erlang-cookie\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028642 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-server-conf\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028777 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2fd90353-44d0-4269-84cc-f90c10eb6da4-erlang-cookie-secret\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028804 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wzds\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-kube-api-access-2wzds\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.028854 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-config-data\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.030402 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.030438 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.031915 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.038249 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-kube-api-access-2wzds" (OuterVolumeSpecName: "kube-api-access-2wzds") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "kube-api-access-2wzds". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.041740 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.055404 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/2fd90353-44d0-4269-84cc-f90c10eb6da4-pod-info" (OuterVolumeSpecName: "pod-info") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.066524 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fd90353-44d0-4269-84cc-f90c10eb6da4-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.141613 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.141645 4949 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2fd90353-44d0-4269-84cc-f90c10eb6da4-pod-info\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.141654 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.141690 4949 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.141700 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.141710 4949 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2fd90353-44d0-4269-84cc-f90c10eb6da4-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.141723 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wzds\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-kube-api-access-2wzds\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.163693 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-config-data" (OuterVolumeSpecName: "config-data") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.234152 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-server-conf" (OuterVolumeSpecName: "server-conf") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.249677 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.249710 4949 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2fd90353-44d0-4269-84cc-f90c10eb6da4-server-conf\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.301596 4949 generic.go:334] "Generic (PLEG): container finished" podID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerID="4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815" exitCode=0 Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.301722 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lk6vt" event={"ID":"deeafd19-11a8-449b-9ff0-c2f177ceb4b0","Type":"ContainerDied","Data":"4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815"} Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.339727 4949 generic.go:334] "Generic (PLEG): container finished" podID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerID="7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f" exitCode=0 Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.340070 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2fd90353-44d0-4269-84cc-f90c10eb6da4","Type":"ContainerDied","Data":"7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f"} Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.340119 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2fd90353-44d0-4269-84cc-f90c10eb6da4","Type":"ContainerDied","Data":"44dab8a1e0b6b38472154706d763c323ef398137fa0702f28119f2592037c06a"} Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.340143 4949 scope.go:117] "RemoveContainer" containerID="7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.340740 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 16 11:36:29 crc kubenswrapper[4949]: E0216 11:36:29.401801 4949 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771 podName:2fd90353-44d0-4269-84cc-f90c10eb6da4 nodeName:}" failed. No retries permitted until 2026-02-16 11:36:29.901740901 +0000 UTC m=+1779.531075076 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "persistence" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4") : kubernetes.io/csi: Unmounter.TearDownAt failed: rpc error: code = Unknown desc = check target path: could not get consistent content of /proc/mounts after 3 attempts Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.422758 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.458405 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6"] Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.482565 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2fd90353-44d0-4269-84cc-f90c10eb6da4-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.491854 4949 scope.go:117] "RemoveContainer" containerID="73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.517683 4949 scope.go:117] "RemoveContainer" containerID="7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f" Feb 16 11:36:29 crc kubenswrapper[4949]: E0216 11:36:29.518189 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f\": container with ID starting with 7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f not found: ID does not exist" containerID="7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.518255 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f"} err="failed to get container status \"7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f\": rpc error: code = NotFound desc = could not find container \"7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f\": container with ID starting with 7474434149d49ef0e5a3211b6680db0224f03af056ef4ae8e7648282e53a4e3f not found: ID does not exist" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.518295 4949 scope.go:117] "RemoveContainer" containerID="73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9" Feb 16 11:36:29 crc kubenswrapper[4949]: E0216 11:36:29.518776 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9\": container with ID starting with 73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9 not found: ID does not exist" containerID="73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.518812 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9"} err="failed to get container status \"73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9\": rpc error: code = NotFound desc = could not find container \"73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9\": container with ID starting with 73fe0cd43b3472a86306abd4f0a156cdbb5d3c1027b363760eaa3d0147f983b9 not found: ID does not exist" Feb 16 11:36:29 crc kubenswrapper[4949]: I0216 11:36:29.994002 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"2fd90353-44d0-4269-84cc-f90c10eb6da4\" (UID: \"2fd90353-44d0-4269-84cc-f90c10eb6da4\") " Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.019352 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771" (OuterVolumeSpecName: "persistence") pod "2fd90353-44d0-4269-84cc-f90c10eb6da4" (UID: "2fd90353-44d0-4269-84cc-f90c10eb6da4"). InnerVolumeSpecName "pvc-5a5823a0-ef3a-488c-abcb-c6baba529771". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.098649 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") on node \"crc\" " Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.151611 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.152284 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-5a5823a0-ef3a-488c-abcb-c6baba529771" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771") on node "crc" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.165053 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.184624 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.197294 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:36:30 crc kubenswrapper[4949]: E0216 11:36:30.198132 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerName="setup-container" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.198225 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerName="setup-container" Feb 16 11:36:30 crc kubenswrapper[4949]: E0216 11:36:30.198294 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerName="rabbitmq" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.198348 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerName="rabbitmq" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.198696 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" containerName="rabbitmq" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.200144 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.202982 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.214888 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306072 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306247 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-pod-info\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306332 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-config-data\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306354 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306434 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306492 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306552 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28lkg\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-kube-api-access-28lkg\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306586 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306656 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306689 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-server-conf\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.306772 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.358292 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" event={"ID":"8bd581a9-2646-4045-bfb1-a0a4e356936d","Type":"ContainerStarted","Data":"4a97eb96d66162c3cbec8f5bba2813facd1cf8930a962225e4c0dd3890527a53"} Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.358344 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" event={"ID":"8bd581a9-2646-4045-bfb1-a0a4e356936d","Type":"ContainerStarted","Data":"8a62ac2404bef79eaeeb1f0031046944f97621dbb5960ff29249ff6a9e07400f"} Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.361853 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lk6vt" event={"ID":"deeafd19-11a8-449b-9ff0-c2f177ceb4b0","Type":"ContainerStarted","Data":"4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96"} Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.379222 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" podStartSLOduration=1.948080838 podStartE2EDuration="2.379200361s" podCreationTimestamp="2026-02-16 11:36:28 +0000 UTC" firstStartedPulling="2026-02-16 11:36:29.43307274 +0000 UTC m=+1779.062406905" lastFinishedPulling="2026-02-16 11:36:29.864192263 +0000 UTC m=+1779.493526428" observedRunningTime="2026-02-16 11:36:30.375540206 +0000 UTC m=+1780.004874371" watchObservedRunningTime="2026-02-16 11:36:30.379200361 +0000 UTC m=+1780.008534526" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.408499 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.408559 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-pod-info\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.408605 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-config-data\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.408620 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.408646 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.408682 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.409471 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28lkg\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-kube-api-access-28lkg\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.409500 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.409560 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.409587 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-server-conf\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.409748 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.410103 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.410050 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.410913 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.410949 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lk6vt" podStartSLOduration=3.873086526 podStartE2EDuration="6.41089245s" podCreationTimestamp="2026-02-16 11:36:24 +0000 UTC" firstStartedPulling="2026-02-16 11:36:27.210543927 +0000 UTC m=+1776.839878092" lastFinishedPulling="2026-02-16 11:36:29.748349851 +0000 UTC m=+1779.377684016" observedRunningTime="2026-02-16 11:36:30.391970978 +0000 UTC m=+1780.021305143" watchObservedRunningTime="2026-02-16 11:36:30.41089245 +0000 UTC m=+1780.040226615" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.411598 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-server-conf\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.411988 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-config-data\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.414594 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-pod-info\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.414807 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.414851 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.414852 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/58db376e1c9e37d07cbb4afde9259fa1e392f418eb7d5b83a633bbc03bd8d1d4/globalmount\"" pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.415863 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.416680 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.432250 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28lkg\" (UniqueName: \"kubernetes.io/projected/e0597426-aa70-4dc2-a6dc-c6c2aeea1f27-kube-api-access-28lkg\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.471339 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5a5823a0-ef3a-488c-abcb-c6baba529771\") pod \"rabbitmq-server-1\" (UID: \"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27\") " pod="openstack/rabbitmq-server-1" Feb 16 11:36:30 crc kubenswrapper[4949]: I0216 11:36:30.529669 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 16 11:36:31 crc kubenswrapper[4949]: I0216 11:36:31.069617 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 16 11:36:31 crc kubenswrapper[4949]: E0216 11:36:31.250672 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:36:31 crc kubenswrapper[4949]: I0216 11:36:31.259889 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fd90353-44d0-4269-84cc-f90c10eb6da4" path="/var/lib/kubelet/pods/2fd90353-44d0-4269-84cc-f90c10eb6da4/volumes" Feb 16 11:36:31 crc kubenswrapper[4949]: I0216 11:36:31.374538 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27","Type":"ContainerStarted","Data":"838958bcaf4c8015b0b02e15df028f8cc1e02b87d99f32a29661adfa467bda65"} Feb 16 11:36:33 crc kubenswrapper[4949]: E0216 11:36:33.237468 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:36:33 crc kubenswrapper[4949]: I0216 11:36:33.401696 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27","Type":"ContainerStarted","Data":"48286ed4fedb8e8dd3e6be0f1421bbd29679df31a23ab5a95a56cd26d538b412"} Feb 16 11:36:33 crc kubenswrapper[4949]: I0216 11:36:33.404257 4949 generic.go:334] "Generic (PLEG): container finished" podID="8bd581a9-2646-4045-bfb1-a0a4e356936d" containerID="4a97eb96d66162c3cbec8f5bba2813facd1cf8930a962225e4c0dd3890527a53" exitCode=0 Feb 16 11:36:33 crc kubenswrapper[4949]: I0216 11:36:33.404333 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" event={"ID":"8bd581a9-2646-4045-bfb1-a0a4e356936d","Type":"ContainerDied","Data":"4a97eb96d66162c3cbec8f5bba2813facd1cf8930a962225e4c0dd3890527a53"} Feb 16 11:36:34 crc kubenswrapper[4949]: I0216 11:36:34.236846 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:36:34 crc kubenswrapper[4949]: E0216 11:36:34.238382 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:36:34 crc kubenswrapper[4949]: I0216 11:36:34.944812 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.045359 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glr48\" (UniqueName: \"kubernetes.io/projected/8bd581a9-2646-4045-bfb1-a0a4e356936d-kube-api-access-glr48\") pod \"8bd581a9-2646-4045-bfb1-a0a4e356936d\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.045604 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-ssh-key-openstack-edpm-ipam\") pod \"8bd581a9-2646-4045-bfb1-a0a4e356936d\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.045630 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-inventory\") pod \"8bd581a9-2646-4045-bfb1-a0a4e356936d\" (UID: \"8bd581a9-2646-4045-bfb1-a0a4e356936d\") " Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.052722 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bd581a9-2646-4045-bfb1-a0a4e356936d-kube-api-access-glr48" (OuterVolumeSpecName: "kube-api-access-glr48") pod "8bd581a9-2646-4045-bfb1-a0a4e356936d" (UID: "8bd581a9-2646-4045-bfb1-a0a4e356936d"). InnerVolumeSpecName "kube-api-access-glr48". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.079050 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-inventory" (OuterVolumeSpecName: "inventory") pod "8bd581a9-2646-4045-bfb1-a0a4e356936d" (UID: "8bd581a9-2646-4045-bfb1-a0a4e356936d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.085072 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "8bd581a9-2646-4045-bfb1-a0a4e356936d" (UID: "8bd581a9-2646-4045-bfb1-a0a4e356936d"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.174489 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glr48\" (UniqueName: \"kubernetes.io/projected/8bd581a9-2646-4045-bfb1-a0a4e356936d-kube-api-access-glr48\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.174545 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.174561 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bd581a9-2646-4045-bfb1-a0a4e356936d-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.358256 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.358337 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.420658 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.434758 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.434752 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-f25z6" event={"ID":"8bd581a9-2646-4045-bfb1-a0a4e356936d","Type":"ContainerDied","Data":"8a62ac2404bef79eaeeb1f0031046944f97621dbb5960ff29249ff6a9e07400f"} Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.434927 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a62ac2404bef79eaeeb1f0031046944f97621dbb5960ff29249ff6a9e07400f" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.511761 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.582690 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2"] Feb 16 11:36:35 crc kubenswrapper[4949]: E0216 11:36:35.583362 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bd581a9-2646-4045-bfb1-a0a4e356936d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.583386 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bd581a9-2646-4045-bfb1-a0a4e356936d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.583722 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bd581a9-2646-4045-bfb1-a0a4e356936d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.585035 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.588968 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.589219 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.589346 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.589468 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.601027 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2"] Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.666086 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lk6vt"] Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.688777 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.688905 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.689839 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.689970 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lffg\" (UniqueName: \"kubernetes.io/projected/f26a3c47-2f6f-481f-b344-964ee178b1d8-kube-api-access-9lffg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.792400 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.792449 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lffg\" (UniqueName: \"kubernetes.io/projected/f26a3c47-2f6f-481f-b344-964ee178b1d8-kube-api-access-9lffg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.792607 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.792677 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.798369 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.799905 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.800283 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.814847 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lffg\" (UniqueName: \"kubernetes.io/projected/f26a3c47-2f6f-481f-b344-964ee178b1d8-kube-api-access-9lffg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:35 crc kubenswrapper[4949]: I0216 11:36:35.912831 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:36:36 crc kubenswrapper[4949]: I0216 11:36:36.490720 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2"] Feb 16 11:36:37 crc kubenswrapper[4949]: I0216 11:36:37.458450 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" event={"ID":"f26a3c47-2f6f-481f-b344-964ee178b1d8","Type":"ContainerStarted","Data":"3d9c7c8b798d53fddf4ef4320eec5eadaa44be3bd927eb440e76cd4eaf2a8d0c"} Feb 16 11:36:37 crc kubenswrapper[4949]: I0216 11:36:37.458870 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" event={"ID":"f26a3c47-2f6f-481f-b344-964ee178b1d8","Type":"ContainerStarted","Data":"deeda77b65c5562dfe7c4181050dfc00e4917e1083de340a2578be41d9693828"} Feb 16 11:36:37 crc kubenswrapper[4949]: I0216 11:36:37.458645 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lk6vt" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerName="registry-server" containerID="cri-o://4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96" gracePeriod=2 Feb 16 11:36:37 crc kubenswrapper[4949]: I0216 11:36:37.494909 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" podStartSLOduration=2.069505432 podStartE2EDuration="2.49488748s" podCreationTimestamp="2026-02-16 11:36:35 +0000 UTC" firstStartedPulling="2026-02-16 11:36:36.493065212 +0000 UTC m=+1786.122399397" lastFinishedPulling="2026-02-16 11:36:36.91844728 +0000 UTC m=+1786.547781445" observedRunningTime="2026-02-16 11:36:37.478651305 +0000 UTC m=+1787.107985480" watchObservedRunningTime="2026-02-16 11:36:37.49488748 +0000 UTC m=+1787.124221645" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.042688 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.154339 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj92l\" (UniqueName: \"kubernetes.io/projected/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-kube-api-access-qj92l\") pod \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.154457 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-utilities\") pod \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.154533 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-catalog-content\") pod \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\" (UID: \"deeafd19-11a8-449b-9ff0-c2f177ceb4b0\") " Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.158960 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-utilities" (OuterVolumeSpecName: "utilities") pod "deeafd19-11a8-449b-9ff0-c2f177ceb4b0" (UID: "deeafd19-11a8-449b-9ff0-c2f177ceb4b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.166512 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-kube-api-access-qj92l" (OuterVolumeSpecName: "kube-api-access-qj92l") pod "deeafd19-11a8-449b-9ff0-c2f177ceb4b0" (UID: "deeafd19-11a8-449b-9ff0-c2f177ceb4b0"). InnerVolumeSpecName "kube-api-access-qj92l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.190669 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "deeafd19-11a8-449b-9ff0-c2f177ceb4b0" (UID: "deeafd19-11a8-449b-9ff0-c2f177ceb4b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.258153 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj92l\" (UniqueName: \"kubernetes.io/projected/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-kube-api-access-qj92l\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.258202 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.258216 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deeafd19-11a8-449b-9ff0-c2f177ceb4b0-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.472759 4949 generic.go:334] "Generic (PLEG): container finished" podID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerID="4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96" exitCode=0 Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.472833 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lk6vt" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.472842 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lk6vt" event={"ID":"deeafd19-11a8-449b-9ff0-c2f177ceb4b0","Type":"ContainerDied","Data":"4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96"} Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.473232 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lk6vt" event={"ID":"deeafd19-11a8-449b-9ff0-c2f177ceb4b0","Type":"ContainerDied","Data":"3b1801480956b9f6e8f2cc636f034ed5383eab2f36b3df82af3c8d5fa8ae2464"} Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.473261 4949 scope.go:117] "RemoveContainer" containerID="4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.518843 4949 scope.go:117] "RemoveContainer" containerID="4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.545300 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lk6vt"] Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.558502 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lk6vt"] Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.567860 4949 scope.go:117] "RemoveContainer" containerID="d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.608373 4949 scope.go:117] "RemoveContainer" containerID="4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96" Feb 16 11:36:38 crc kubenswrapper[4949]: E0216 11:36:38.608954 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96\": container with ID starting with 4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96 not found: ID does not exist" containerID="4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.609005 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96"} err="failed to get container status \"4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96\": rpc error: code = NotFound desc = could not find container \"4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96\": container with ID starting with 4ef786e8127932d5833a4db46810b6ad6608cfbb9960a8d4a98009f1382c9b96 not found: ID does not exist" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.609036 4949 scope.go:117] "RemoveContainer" containerID="4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815" Feb 16 11:36:38 crc kubenswrapper[4949]: E0216 11:36:38.609612 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815\": container with ID starting with 4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815 not found: ID does not exist" containerID="4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.609641 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815"} err="failed to get container status \"4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815\": rpc error: code = NotFound desc = could not find container \"4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815\": container with ID starting with 4db7f47e845dced6fe095a970bd20bb5fd2301fde01d774dba1bdc89a4e7c815 not found: ID does not exist" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.609661 4949 scope.go:117] "RemoveContainer" containerID="d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342" Feb 16 11:36:38 crc kubenswrapper[4949]: E0216 11:36:38.609968 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342\": container with ID starting with d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342 not found: ID does not exist" containerID="d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342" Feb 16 11:36:38 crc kubenswrapper[4949]: I0216 11:36:38.609997 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342"} err="failed to get container status \"d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342\": rpc error: code = NotFound desc = could not find container \"d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342\": container with ID starting with d2ea3bc92f6fab80ae6f2c6524bcdafa88f2cb8934887ea4e61543cd38986342 not found: ID does not exist" Feb 16 11:36:39 crc kubenswrapper[4949]: I0216 11:36:39.251588 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" path="/var/lib/kubelet/pods/deeafd19-11a8-449b-9ff0-c2f177ceb4b0/volumes" Feb 16 11:36:43 crc kubenswrapper[4949]: E0216 11:36:43.358694 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:36:43 crc kubenswrapper[4949]: E0216 11:36:43.359370 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:36:43 crc kubenswrapper[4949]: E0216 11:36:43.359699 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:36:43 crc kubenswrapper[4949]: E0216 11:36:43.361302 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:36:46 crc kubenswrapper[4949]: E0216 11:36:46.366403 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:36:46 crc kubenswrapper[4949]: E0216 11:36:46.366884 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:36:46 crc kubenswrapper[4949]: E0216 11:36:46.367041 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:36:46 crc kubenswrapper[4949]: E0216 11:36:46.368290 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:36:48 crc kubenswrapper[4949]: I0216 11:36:48.236803 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:36:48 crc kubenswrapper[4949]: E0216 11:36:48.237553 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:36:55 crc kubenswrapper[4949]: I0216 11:36:55.106486 4949 scope.go:117] "RemoveContainer" containerID="0d8e0dfc7ed2a7a584158a61c7a84d8168b4e7cce323bf8d92f2ace421633ffc" Feb 16 11:36:55 crc kubenswrapper[4949]: I0216 11:36:55.145829 4949 scope.go:117] "RemoveContainer" containerID="d67186fb11e74fd25242f5bd451feeedee251d7cce9bf89a9a7455d2d03bd3c5" Feb 16 11:36:57 crc kubenswrapper[4949]: E0216 11:36:57.238125 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:37:00 crc kubenswrapper[4949]: I0216 11:37:00.235879 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:37:00 crc kubenswrapper[4949]: E0216 11:37:00.236869 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:37:01 crc kubenswrapper[4949]: E0216 11:37:01.253641 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:37:05 crc kubenswrapper[4949]: I0216 11:37:05.807232 4949 generic.go:334] "Generic (PLEG): container finished" podID="e0597426-aa70-4dc2-a6dc-c6c2aeea1f27" containerID="48286ed4fedb8e8dd3e6be0f1421bbd29679df31a23ab5a95a56cd26d538b412" exitCode=0 Feb 16 11:37:05 crc kubenswrapper[4949]: I0216 11:37:05.807321 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27","Type":"ContainerDied","Data":"48286ed4fedb8e8dd3e6be0f1421bbd29679df31a23ab5a95a56cd26d538b412"} Feb 16 11:37:06 crc kubenswrapper[4949]: I0216 11:37:06.844216 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"e0597426-aa70-4dc2-a6dc-c6c2aeea1f27","Type":"ContainerStarted","Data":"150a10c53cfb5d089fb4994dc777291e3277fd5030ec3341fe9b0db81e2b725a"} Feb 16 11:37:06 crc kubenswrapper[4949]: I0216 11:37:06.844783 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Feb 16 11:37:06 crc kubenswrapper[4949]: I0216 11:37:06.907745 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=36.907722941 podStartE2EDuration="36.907722941s" podCreationTimestamp="2026-02-16 11:36:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:37:06.880035087 +0000 UTC m=+1816.509369252" watchObservedRunningTime="2026-02-16 11:37:06.907722941 +0000 UTC m=+1816.537057106" Feb 16 11:37:11 crc kubenswrapper[4949]: E0216 11:37:11.248250 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:37:12 crc kubenswrapper[4949]: I0216 11:37:12.235921 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:37:12 crc kubenswrapper[4949]: E0216 11:37:12.236251 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:37:16 crc kubenswrapper[4949]: E0216 11:37:16.237599 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:37:20 crc kubenswrapper[4949]: I0216 11:37:20.532351 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Feb 16 11:37:20 crc kubenswrapper[4949]: I0216 11:37:20.609836 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:37:22 crc kubenswrapper[4949]: E0216 11:37:22.237698 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:37:24 crc kubenswrapper[4949]: I0216 11:37:24.903212 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerName="rabbitmq" containerID="cri-o://0aa10bfe0b645929c6b02ae797deef640d296001d6e7f8b326d64d53facdf814" gracePeriod=604796 Feb 16 11:37:26 crc kubenswrapper[4949]: I0216 11:37:26.236221 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:37:26 crc kubenswrapper[4949]: E0216 11:37:26.237020 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:37:29 crc kubenswrapper[4949]: E0216 11:37:29.238959 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:37:30 crc kubenswrapper[4949]: I0216 11:37:30.873143 4949 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.128:5671: connect: connection refused" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.183126 4949 generic.go:334] "Generic (PLEG): container finished" podID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerID="0aa10bfe0b645929c6b02ae797deef640d296001d6e7f8b326d64d53facdf814" exitCode=0 Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.183210 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2b4e8478-eec0-499f-a824-b0f07355e4f6","Type":"ContainerDied","Data":"0aa10bfe0b645929c6b02ae797deef640d296001d6e7f8b326d64d53facdf814"} Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.593471 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.780923 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2b4e8478-eec0-499f-a824-b0f07355e4f6-erlang-cookie-secret\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.780977 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-config-data\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.782049 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-confd\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.782125 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-server-conf\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.782162 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2b4e8478-eec0-499f-a824-b0f07355e4f6-pod-info\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.782279 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-plugins-conf\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.782945 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.783061 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxqff\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-kube-api-access-pxqff\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.783097 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-plugins\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.783120 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-tls\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.783208 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-erlang-cookie\") pod \"2b4e8478-eec0-499f-a824-b0f07355e4f6\" (UID: \"2b4e8478-eec0-499f-a824-b0f07355e4f6\") " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.784414 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.788778 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-kube-api-access-pxqff" (OuterVolumeSpecName: "kube-api-access-pxqff") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "kube-api-access-pxqff". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.790975 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.791365 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4e8478-eec0-499f-a824-b0f07355e4f6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.796451 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.802428 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/2b4e8478-eec0-499f-a824-b0f07355e4f6-pod-info" (OuterVolumeSpecName: "pod-info") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.802548 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.841378 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f" (OuterVolumeSpecName: "persistence") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "pvc-488ac636-798e-4e54-8b90-72d85ab2610f". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.851208 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-config-data" (OuterVolumeSpecName: "config-data") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.886732 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-server-conf" (OuterVolumeSpecName: "server-conf") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888121 4949 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-server-conf\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888454 4949 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2b4e8478-eec0-499f-a824-b0f07355e4f6-pod-info\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888480 4949 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888511 4949 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") on node \"crc\" " Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888525 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxqff\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-kube-api-access-pxqff\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888535 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888543 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888551 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888559 4949 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2b4e8478-eec0-499f-a824-b0f07355e4f6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.888567 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2b4e8478-eec0-499f-a824-b0f07355e4f6-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.926479 4949 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.926631 4949 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-488ac636-798e-4e54-8b90-72d85ab2610f" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f") on node "crc" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.966512 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "2b4e8478-eec0-499f-a824-b0f07355e4f6" (UID: "2b4e8478-eec0-499f-a824-b0f07355e4f6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.991133 4949 reconciler_common.go:293] "Volume detached for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:31 crc kubenswrapper[4949]: I0216 11:37:31.991196 4949 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2b4e8478-eec0-499f-a824-b0f07355e4f6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.204099 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2b4e8478-eec0-499f-a824-b0f07355e4f6","Type":"ContainerDied","Data":"e5b8549583bda9f42a5052692f24aa6850f0788c2b274beb42bd5b481e323e2b"} Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.204264 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.204606 4949 scope.go:117] "RemoveContainer" containerID="0aa10bfe0b645929c6b02ae797deef640d296001d6e7f8b326d64d53facdf814" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.239895 4949 scope.go:117] "RemoveContainer" containerID="184ebec4345da93f1c2a65db2c7a0090e4039d1d6b0ebf753debbf33a95841da" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.250145 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.262575 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.294289 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:37:32 crc kubenswrapper[4949]: E0216 11:37:32.294993 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerName="setup-container" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.295012 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerName="setup-container" Feb 16 11:37:32 crc kubenswrapper[4949]: E0216 11:37:32.295051 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerName="extract-utilities" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.295057 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerName="extract-utilities" Feb 16 11:37:32 crc kubenswrapper[4949]: E0216 11:37:32.295073 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerName="extract-content" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.295081 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerName="extract-content" Feb 16 11:37:32 crc kubenswrapper[4949]: E0216 11:37:32.295102 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerName="rabbitmq" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.295110 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerName="rabbitmq" Feb 16 11:37:32 crc kubenswrapper[4949]: E0216 11:37:32.295133 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerName="registry-server" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.295139 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerName="registry-server" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.296100 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="deeafd19-11a8-449b-9ff0-c2f177ceb4b0" containerName="registry-server" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.296132 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" containerName="rabbitmq" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.297450 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.311699 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.405683 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4664279d-8b37-426a-a677-a3e982fb6beb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.405753 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4664279d-8b37-426a-a677-a3e982fb6beb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.405900 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.405943 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.405973 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.406037 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.406075 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.406102 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.406122 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbmz7\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-kube-api-access-qbmz7\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.406159 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-config-data\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.406215 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.508857 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.509148 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.509193 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbmz7\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-kube-api-access-qbmz7\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.509220 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-config-data\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.509248 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.510127 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-config-data\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.510385 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.510411 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4664279d-8b37-426a-a677-a3e982fb6beb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.510719 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4664279d-8b37-426a-a677-a3e982fb6beb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.510767 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4664279d-8b37-426a-a677-a3e982fb6beb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.510970 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.511007 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.511060 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.511165 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.511671 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.512098 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.520925 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.527827 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4664279d-8b37-426a-a677-a3e982fb6beb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.544135 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4664279d-8b37-426a-a677-a3e982fb6beb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.545067 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbmz7\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-kube-api-access-qbmz7\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.546025 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4664279d-8b37-426a-a677-a3e982fb6beb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.561827 4949 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.561873 4949 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1ff64a0fd9ac55a5bb2ee7ea71b4291dabca12fad6bff89052d3968762dcebbb/globalmount\"" pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.844610 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-488ac636-798e-4e54-8b90-72d85ab2610f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-488ac636-798e-4e54-8b90-72d85ab2610f\") pod \"rabbitmq-server-0\" (UID: \"4664279d-8b37-426a-a677-a3e982fb6beb\") " pod="openstack/rabbitmq-server-0" Feb 16 11:37:32 crc kubenswrapper[4949]: I0216 11:37:32.957408 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 16 11:37:33 crc kubenswrapper[4949]: I0216 11:37:33.253124 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b4e8478-eec0-499f-a824-b0f07355e4f6" path="/var/lib/kubelet/pods/2b4e8478-eec0-499f-a824-b0f07355e4f6/volumes" Feb 16 11:37:33 crc kubenswrapper[4949]: I0216 11:37:33.501923 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 16 11:37:34 crc kubenswrapper[4949]: I0216 11:37:34.231242 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4664279d-8b37-426a-a677-a3e982fb6beb","Type":"ContainerStarted","Data":"c8618f0f33b5f05fd443593efdcca7f4b0c0ac47959588ba806016f9a9e79f91"} Feb 16 11:37:35 crc kubenswrapper[4949]: E0216 11:37:35.241010 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:37:36 crc kubenswrapper[4949]: I0216 11:37:36.254163 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4664279d-8b37-426a-a677-a3e982fb6beb","Type":"ContainerStarted","Data":"3f847dac5fa1076030f68f7c9f374b0ec80caba859ec2856f6ed1ae08c6cdd1c"} Feb 16 11:37:37 crc kubenswrapper[4949]: I0216 11:37:37.237833 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:37:37 crc kubenswrapper[4949]: E0216 11:37:37.238790 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:37:43 crc kubenswrapper[4949]: E0216 11:37:43.238840 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:37:48 crc kubenswrapper[4949]: I0216 11:37:48.237892 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:37:48 crc kubenswrapper[4949]: E0216 11:37:48.240647 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:37:48 crc kubenswrapper[4949]: E0216 11:37:48.242512 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:37:56 crc kubenswrapper[4949]: E0216 11:37:56.237475 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:38:01 crc kubenswrapper[4949]: I0216 11:38:01.236648 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:38:01 crc kubenswrapper[4949]: E0216 11:38:01.237482 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:38:01 crc kubenswrapper[4949]: E0216 11:38:01.243712 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:38:08 crc kubenswrapper[4949]: I0216 11:38:08.687402 4949 generic.go:334] "Generic (PLEG): container finished" podID="4664279d-8b37-426a-a677-a3e982fb6beb" containerID="3f847dac5fa1076030f68f7c9f374b0ec80caba859ec2856f6ed1ae08c6cdd1c" exitCode=0 Feb 16 11:38:08 crc kubenswrapper[4949]: I0216 11:38:08.687464 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4664279d-8b37-426a-a677-a3e982fb6beb","Type":"ContainerDied","Data":"3f847dac5fa1076030f68f7c9f374b0ec80caba859ec2856f6ed1ae08c6cdd1c"} Feb 16 11:38:09 crc kubenswrapper[4949]: I0216 11:38:09.702463 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4664279d-8b37-426a-a677-a3e982fb6beb","Type":"ContainerStarted","Data":"9711d6051de15b8c1ab5dff3a5b8af9551a111414e7ebc001e5a75096ac4add2"} Feb 16 11:38:09 crc kubenswrapper[4949]: I0216 11:38:09.703223 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Feb 16 11:38:09 crc kubenswrapper[4949]: I0216 11:38:09.738192 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.738149262 podStartE2EDuration="37.738149262s" podCreationTimestamp="2026-02-16 11:37:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:38:09.724334697 +0000 UTC m=+1879.353668872" watchObservedRunningTime="2026-02-16 11:38:09.738149262 +0000 UTC m=+1879.367483427" Feb 16 11:38:10 crc kubenswrapper[4949]: E0216 11:38:10.726513 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:38:10 crc kubenswrapper[4949]: E0216 11:38:10.726587 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:38:10 crc kubenswrapper[4949]: E0216 11:38:10.726724 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:38:10 crc kubenswrapper[4949]: E0216 11:38:10.728744 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:38:14 crc kubenswrapper[4949]: E0216 11:38:14.358475 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:38:14 crc kubenswrapper[4949]: E0216 11:38:14.359270 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:38:14 crc kubenswrapper[4949]: E0216 11:38:14.359400 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:38:14 crc kubenswrapper[4949]: E0216 11:38:14.360584 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:38:16 crc kubenswrapper[4949]: I0216 11:38:16.235849 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:38:16 crc kubenswrapper[4949]: E0216 11:38:16.236415 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:38:22 crc kubenswrapper[4949]: I0216 11:38:22.961220 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Feb 16 11:38:25 crc kubenswrapper[4949]: E0216 11:38:25.240445 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:38:26 crc kubenswrapper[4949]: E0216 11:38:26.236791 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:38:31 crc kubenswrapper[4949]: I0216 11:38:31.239444 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:38:31 crc kubenswrapper[4949]: E0216 11:38:31.240493 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:38:39 crc kubenswrapper[4949]: E0216 11:38:39.239373 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:38:39 crc kubenswrapper[4949]: E0216 11:38:39.239947 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:38:43 crc kubenswrapper[4949]: I0216 11:38:43.236779 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:38:43 crc kubenswrapper[4949]: E0216 11:38:43.237841 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:38:52 crc kubenswrapper[4949]: E0216 11:38:52.238289 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:38:54 crc kubenswrapper[4949]: E0216 11:38:54.245335 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:38:58 crc kubenswrapper[4949]: I0216 11:38:58.236459 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:38:58 crc kubenswrapper[4949]: E0216 11:38:58.237325 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:39:06 crc kubenswrapper[4949]: E0216 11:39:06.240399 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:39:09 crc kubenswrapper[4949]: E0216 11:39:09.237135 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:39:12 crc kubenswrapper[4949]: I0216 11:39:12.236493 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:39:12 crc kubenswrapper[4949]: I0216 11:39:12.571935 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"096a296a43c90ecf253548b6f73536cd24c074d6cf2f0b80b69fc989695ff765"} Feb 16 11:39:18 crc kubenswrapper[4949]: E0216 11:39:18.240791 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:39:20 crc kubenswrapper[4949]: I0216 11:39:20.045855 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-29db-account-create-update-sk7mb"] Feb 16 11:39:20 crc kubenswrapper[4949]: I0216 11:39:20.086771 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-29db-account-create-update-sk7mb"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.040410 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-mfr7w"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.056669 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-p2zsr"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.069243 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-1a86-account-create-update-2sw8c"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.083345 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-vmnzk"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.097380 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-p2zsr"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.109663 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-mfr7w"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.122345 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-w6p2b"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.135973 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-w6p2b"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.149129 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-1a86-account-create-update-2sw8c"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.162684 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-vmnzk"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.175772 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-ecac-account-create-update-kqvjh"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.188580 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-43c4-account-create-update-jnnqw"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.199495 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-ecac-account-create-update-kqvjh"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.210645 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-43c4-account-create-update-jnnqw"] Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.253427 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0686a1d4-34d8-48ed-8e05-14f6e2b65462" path="/var/lib/kubelet/pods/0686a1d4-34d8-48ed-8e05-14f6e2b65462/volumes" Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.255565 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fbeb10d-7447-462b-8a2b-b68d715a7d9e" path="/var/lib/kubelet/pods/0fbeb10d-7447-462b-8a2b-b68d715a7d9e/volumes" Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.256663 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25f93b07-c059-4191-9ad3-7cb42412b2fb" path="/var/lib/kubelet/pods/25f93b07-c059-4191-9ad3-7cb42412b2fb/volumes" Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.258186 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7118450d-767c-4539-801a-2ab0b5715487" path="/var/lib/kubelet/pods/7118450d-767c-4539-801a-2ab0b5715487/volumes" Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.259366 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8068ac9c-ae28-47f4-af20-1565bb98c28f" path="/var/lib/kubelet/pods/8068ac9c-ae28-47f4-af20-1565bb98c28f/volumes" Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.262322 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99deca80-3672-484c-9902-878b7a51ac6a" path="/var/lib/kubelet/pods/99deca80-3672-484c-9902-878b7a51ac6a/volumes" Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.263273 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e3f4b81-c950-440a-a85a-6769c6affd08" path="/var/lib/kubelet/pods/9e3f4b81-c950-440a-a85a-6769c6affd08/volumes" Feb 16 11:39:21 crc kubenswrapper[4949]: I0216 11:39:21.263951 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d81d6ce5-367a-40d2-af0c-c2e88c48bc4e" path="/var/lib/kubelet/pods/d81d6ce5-367a-40d2-af0c-c2e88c48bc4e/volumes" Feb 16 11:39:22 crc kubenswrapper[4949]: E0216 11:39:22.239329 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:39:29 crc kubenswrapper[4949]: I0216 11:39:29.883909 4949 generic.go:334] "Generic (PLEG): container finished" podID="f26a3c47-2f6f-481f-b344-964ee178b1d8" containerID="3d9c7c8b798d53fddf4ef4320eec5eadaa44be3bd927eb440e76cd4eaf2a8d0c" exitCode=0 Feb 16 11:39:29 crc kubenswrapper[4949]: I0216 11:39:29.884069 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" event={"ID":"f26a3c47-2f6f-481f-b344-964ee178b1d8","Type":"ContainerDied","Data":"3d9c7c8b798d53fddf4ef4320eec5eadaa44be3bd927eb440e76cd4eaf2a8d0c"} Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.430797 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.592314 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-inventory\") pod \"f26a3c47-2f6f-481f-b344-964ee178b1d8\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.593526 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lffg\" (UniqueName: \"kubernetes.io/projected/f26a3c47-2f6f-481f-b344-964ee178b1d8-kube-api-access-9lffg\") pod \"f26a3c47-2f6f-481f-b344-964ee178b1d8\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.593905 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-ssh-key-openstack-edpm-ipam\") pod \"f26a3c47-2f6f-481f-b344-964ee178b1d8\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.594018 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-bootstrap-combined-ca-bundle\") pod \"f26a3c47-2f6f-481f-b344-964ee178b1d8\" (UID: \"f26a3c47-2f6f-481f-b344-964ee178b1d8\") " Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.600012 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f26a3c47-2f6f-481f-b344-964ee178b1d8-kube-api-access-9lffg" (OuterVolumeSpecName: "kube-api-access-9lffg") pod "f26a3c47-2f6f-481f-b344-964ee178b1d8" (UID: "f26a3c47-2f6f-481f-b344-964ee178b1d8"). InnerVolumeSpecName "kube-api-access-9lffg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.600375 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "f26a3c47-2f6f-481f-b344-964ee178b1d8" (UID: "f26a3c47-2f6f-481f-b344-964ee178b1d8"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.629573 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "f26a3c47-2f6f-481f-b344-964ee178b1d8" (UID: "f26a3c47-2f6f-481f-b344-964ee178b1d8"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.641408 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-inventory" (OuterVolumeSpecName: "inventory") pod "f26a3c47-2f6f-481f-b344-964ee178b1d8" (UID: "f26a3c47-2f6f-481f-b344-964ee178b1d8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.698713 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.698752 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lffg\" (UniqueName: \"kubernetes.io/projected/f26a3c47-2f6f-481f-b344-964ee178b1d8-kube-api-access-9lffg\") on node \"crc\" DevicePath \"\"" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.698765 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.698778 4949 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f26a3c47-2f6f-481f-b344-964ee178b1d8-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.913384 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" event={"ID":"f26a3c47-2f6f-481f-b344-964ee178b1d8","Type":"ContainerDied","Data":"deeda77b65c5562dfe7c4181050dfc00e4917e1083de340a2578be41d9693828"} Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.913722 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="deeda77b65c5562dfe7c4181050dfc00e4917e1083de340a2578be41d9693828" Feb 16 11:39:31 crc kubenswrapper[4949]: I0216 11:39:31.913475 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.040918 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr"] Feb 16 11:39:32 crc kubenswrapper[4949]: E0216 11:39:32.049515 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f26a3c47-2f6f-481f-b344-964ee178b1d8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.049560 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f26a3c47-2f6f-481f-b344-964ee178b1d8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.050139 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f26a3c47-2f6f-481f-b344-964ee178b1d8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.051529 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr"] Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.053125 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.055889 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.056492 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.056692 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.056876 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.121455 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztvzv\" (UniqueName: \"kubernetes.io/projected/1e96f01e-1b19-4190-9109-75322770d9ba-kube-api-access-ztvzv\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.121513 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.121600 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.224313 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.224547 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztvzv\" (UniqueName: \"kubernetes.io/projected/1e96f01e-1b19-4190-9109-75322770d9ba-kube-api-access-ztvzv\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.224584 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.229769 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.231423 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.244377 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztvzv\" (UniqueName: \"kubernetes.io/projected/1e96f01e-1b19-4190-9109-75322770d9ba-kube-api-access-ztvzv\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.383743 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.979646 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr"] Feb 16 11:39:32 crc kubenswrapper[4949]: I0216 11:39:32.984964 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.060341 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-08b3-account-create-update-69t4v"] Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.076211 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-4q8jw"] Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.092381 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg"] Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.108969 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-08b3-account-create-update-69t4v"] Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.123941 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mx7hg"] Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.136769 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-4q8jw"] Feb 16 11:39:33 crc kubenswrapper[4949]: E0216 11:39:33.238662 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.250948 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="553663f2-42ac-4491-affa-8a15ca2e093b" path="/var/lib/kubelet/pods/553663f2-42ac-4491-affa-8a15ca2e093b/volumes" Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.251612 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8715bb56-ef48-4c95-8d9d-85bdefe22a3e" path="/var/lib/kubelet/pods/8715bb56-ef48-4c95-8d9d-85bdefe22a3e/volumes" Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.253288 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2" path="/var/lib/kubelet/pods/ee2937a9-54c6-43b0-8fbd-5a83e3fd8da2/volumes" Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.947202 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" event={"ID":"1e96f01e-1b19-4190-9109-75322770d9ba","Type":"ContainerStarted","Data":"257dc4ac92be4a672883f0d062443f3d1aeedd408c80e83b1d9074fe40dc999c"} Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.947509 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" event={"ID":"1e96f01e-1b19-4190-9109-75322770d9ba","Type":"ContainerStarted","Data":"d46e0c06b6805b71fed5880f4413c9de8516ce80cec348269db37f6b4da9c969"} Feb 16 11:39:33 crc kubenswrapper[4949]: I0216 11:39:33.973292 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" podStartSLOduration=2.558964594 podStartE2EDuration="2.973269999s" podCreationTimestamp="2026-02-16 11:39:31 +0000 UTC" firstStartedPulling="2026-02-16 11:39:32.98473537 +0000 UTC m=+1962.614069525" lastFinishedPulling="2026-02-16 11:39:33.399040745 +0000 UTC m=+1963.028374930" observedRunningTime="2026-02-16 11:39:33.964945661 +0000 UTC m=+1963.594279826" watchObservedRunningTime="2026-02-16 11:39:33.973269999 +0000 UTC m=+1963.602604164" Feb 16 11:39:36 crc kubenswrapper[4949]: E0216 11:39:36.237676 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:39:46 crc kubenswrapper[4949]: E0216 11:39:46.241157 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:39:51 crc kubenswrapper[4949]: E0216 11:39:51.263625 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.473862 4949 scope.go:117] "RemoveContainer" containerID="20b29a2ba2f47c347276d6283714b343e860327877ad66350877cf5e1d0d668f" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.505796 4949 scope.go:117] "RemoveContainer" containerID="211324f1a27603e7c9df9b512272f9fd3a3fd9f4fbea9ae533f065a06813c2b8" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.565256 4949 scope.go:117] "RemoveContainer" containerID="5114ef3c7b87627e086929c0b1327247fed05cb3e29cca01a258ba0da9cdabc9" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.620986 4949 scope.go:117] "RemoveContainer" containerID="46bef3d44c1e5dd8b60fc72b6cafe5174aac8067729859e639452edd51a24d2e" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.678770 4949 scope.go:117] "RemoveContainer" containerID="779fd297f1a7b349d9a9cf2f7bf3b8e3116c795a103dfadb6d89d0424ef52cd1" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.746485 4949 scope.go:117] "RemoveContainer" containerID="361c9940063923e9aed905955978713e646bee3372d7ee732af6a0fe7cffdf45" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.815066 4949 scope.go:117] "RemoveContainer" containerID="6ce5db53543acedd8939b98845a0bd6fad4f7b3ec6a77cad1fd22ce42423f158" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.845686 4949 scope.go:117] "RemoveContainer" containerID="679e18c9c9c39e98223301f119cf67e2b3940c45f32212ab957d447336da2b17" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.875208 4949 scope.go:117] "RemoveContainer" containerID="d6fd0474a0143aa44ea872486520519acb6d1eda44e8edd66128de36702edd56" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.900273 4949 scope.go:117] "RemoveContainer" containerID="97cc4c6dd1924f33b67aad9a58754432b2191458b979b128389e8f15c7ab0572" Feb 16 11:39:55 crc kubenswrapper[4949]: I0216 11:39:55.926311 4949 scope.go:117] "RemoveContainer" containerID="ef2dd86f5dc586e7f1d85040d39b311aa3092def01e38437226c686f54dbb323" Feb 16 11:39:57 crc kubenswrapper[4949]: E0216 11:39:57.238144 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:39:58 crc kubenswrapper[4949]: I0216 11:39:58.061202 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-lwh9x"] Feb 16 11:39:58 crc kubenswrapper[4949]: I0216 11:39:58.076235 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-lwh9x"] Feb 16 11:39:59 crc kubenswrapper[4949]: I0216 11:39:59.251241 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffc1202a-a99b-4190-984b-511f9d345832" path="/var/lib/kubelet/pods/ffc1202a-a99b-4190-984b-511f9d345832/volumes" Feb 16 11:40:02 crc kubenswrapper[4949]: E0216 11:40:02.238131 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:40:08 crc kubenswrapper[4949]: I0216 11:40:08.039234 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-npkkr"] Feb 16 11:40:08 crc kubenswrapper[4949]: I0216 11:40:08.056401 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-npkkr"] Feb 16 11:40:09 crc kubenswrapper[4949]: I0216 11:40:09.258971 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ada0c1eb-48c6-4d78-b57f-b65979ffbaa4" path="/var/lib/kubelet/pods/ada0c1eb-48c6-4d78-b57f-b65979ffbaa4/volumes" Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.050486 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d46b-account-create-update-2f9k8"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.067134 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-xg9xl"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.089884 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-7ff2-account-create-update-c6fg9"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.101928 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-7e44-account-create-update-pzvfn"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.112222 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-d46b-account-create-update-2f9k8"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.123396 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-7ff2-account-create-update-c6fg9"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.136413 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-xg9xl"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.150982 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-7e44-account-create-update-pzvfn"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.162614 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-f4ctx"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.172804 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-f4ctx"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.182839 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-6n2w7"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.192277 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-6n2w7"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.213408 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-0db9-account-create-update-tt5pm"] Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.232798 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-0db9-account-create-update-tt5pm"] Feb 16 11:40:11 crc kubenswrapper[4949]: E0216 11:40:11.246089 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.262216 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43ddf580-ace8-4f9c-b767-12a5514ac753" path="/var/lib/kubelet/pods/43ddf580-ace8-4f9c-b767-12a5514ac753/volumes" Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.265846 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79fc5ddc-25fd-4b68-957d-ebe934bc9388" path="/var/lib/kubelet/pods/79fc5ddc-25fd-4b68-957d-ebe934bc9388/volumes" Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.267072 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81ef1e0f-3024-4742-952e-44fe02054f6d" path="/var/lib/kubelet/pods/81ef1e0f-3024-4742-952e-44fe02054f6d/volumes" Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.269000 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b74df024-1e28-43d0-b364-9cab63fda88b" path="/var/lib/kubelet/pods/b74df024-1e28-43d0-b364-9cab63fda88b/volumes" Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.271712 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4546872-04ea-4396-acf2-380838b528d0" path="/var/lib/kubelet/pods/c4546872-04ea-4396-acf2-380838b528d0/volumes" Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.274045 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c899f773-b326-4b03-8fbb-87e97cafd63b" path="/var/lib/kubelet/pods/c899f773-b326-4b03-8fbb-87e97cafd63b/volumes" Feb 16 11:40:11 crc kubenswrapper[4949]: I0216 11:40:11.275480 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5f53b06-283f-465d-93ba-4366a5e2147c" path="/var/lib/kubelet/pods/d5f53b06-283f-465d-93ba-4366a5e2147c/volumes" Feb 16 11:40:13 crc kubenswrapper[4949]: E0216 11:40:13.239940 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:40:16 crc kubenswrapper[4949]: I0216 11:40:16.040492 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-nv9nl"] Feb 16 11:40:16 crc kubenswrapper[4949]: I0216 11:40:16.056890 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-nv9nl"] Feb 16 11:40:17 crc kubenswrapper[4949]: I0216 11:40:17.297325 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63e31d0f-ee9a-49ae-8699-2915a121ac10" path="/var/lib/kubelet/pods/63e31d0f-ee9a-49ae-8699-2915a121ac10/volumes" Feb 16 11:40:22 crc kubenswrapper[4949]: E0216 11:40:22.240566 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:40:26 crc kubenswrapper[4949]: E0216 11:40:26.238746 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:40:35 crc kubenswrapper[4949]: E0216 11:40:35.238133 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:40:40 crc kubenswrapper[4949]: E0216 11:40:40.237967 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:40:46 crc kubenswrapper[4949]: I0216 11:40:46.055355 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-t8dz8"] Feb 16 11:40:46 crc kubenswrapper[4949]: I0216 11:40:46.079214 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-t8dz8"] Feb 16 11:40:47 crc kubenswrapper[4949]: I0216 11:40:47.261964 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="527d49b7-fb82-4e46-b608-839a2fce0f60" path="/var/lib/kubelet/pods/527d49b7-fb82-4e46-b608-839a2fce0f60/volumes" Feb 16 11:40:50 crc kubenswrapper[4949]: E0216 11:40:50.238847 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:40:51 crc kubenswrapper[4949]: E0216 11:40:51.256619 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:40:52 crc kubenswrapper[4949]: I0216 11:40:52.055412 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-n7hw7"] Feb 16 11:40:52 crc kubenswrapper[4949]: I0216 11:40:52.067592 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-n7hw7"] Feb 16 11:40:53 crc kubenswrapper[4949]: I0216 11:40:53.254213 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18a2cf62-0669-4033-ba7f-c69805fa9c3a" path="/var/lib/kubelet/pods/18a2cf62-0669-4033-ba7f-c69805fa9c3a/volumes" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.169424 4949 scope.go:117] "RemoveContainer" containerID="4f5987cfcee0d4eaf5bca066f66f2d2dedb3e4f7ee4ea5634e972dd0d04cb774" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.193081 4949 scope.go:117] "RemoveContainer" containerID="a845ea160e9fc1fe6041b77fba350f61cef7f868015422628c23c23f4119b2b2" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.225297 4949 scope.go:117] "RemoveContainer" containerID="2d001d2a33a227777237ae83b5242609605a65737e02593781ebf3e648575fc3" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.278824 4949 scope.go:117] "RemoveContainer" containerID="8777fd92ca59578d672bca13a68eaf951c8a73f222708fec25ee0073f31a7b75" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.323007 4949 scope.go:117] "RemoveContainer" containerID="69b1b8aa41d1642ee1aceb6f196fc1457ca4b10b446dd6f7d8b3ba14678525d7" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.375047 4949 scope.go:117] "RemoveContainer" containerID="2a592931451d1d916cc868c5adb0cda14d9febbe3593b82f0f77a7787285225d" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.434261 4949 scope.go:117] "RemoveContainer" containerID="2cbfdf823955255b3693e9c694a8753eea5f4d2737563c0051726e6d03631f36" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.487538 4949 scope.go:117] "RemoveContainer" containerID="4f151eaf5b83281979d53a553d1543b6885a943977217f39f17da98cb7834023" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.530601 4949 scope.go:117] "RemoveContainer" containerID="19c6e14200617dc051c50463cc0dc5e299b5f18d971cc76865078b394c97dbb1" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.597842 4949 scope.go:117] "RemoveContainer" containerID="d53f54521d581e8e56bef68155f4463c0a42f007eb181629044e5f7281a7e206" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.628665 4949 scope.go:117] "RemoveContainer" containerID="ad72adbb2a85f5f8cca6c83d9583be005af4aa81f79d42acea2e5144a50cfa82" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.652053 4949 scope.go:117] "RemoveContainer" containerID="64fac376aa3bc1cdb5c65b3d6ff8d56eaf9880de805569a9547dc722adec71b9" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.676774 4949 scope.go:117] "RemoveContainer" containerID="21ae4c45d7caa895caba01d65041e6decaba821786efb5f3701696edea47d790" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.706671 4949 scope.go:117] "RemoveContainer" containerID="fcc8559383e59096b9f87048c867e8afa15226d55d9e68cffbfebb89c3962d0e" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.735532 4949 scope.go:117] "RemoveContainer" containerID="d16675331c2b2e304b9fce68190aa8afa3f402b557759fa009a94792c139b38d" Feb 16 11:40:56 crc kubenswrapper[4949]: I0216 11:40:56.764363 4949 scope.go:117] "RemoveContainer" containerID="8abc2f2ddd1dfa9f929835d468021b1acaf6cbfeb080fe16f591723d85b32a41" Feb 16 11:40:58 crc kubenswrapper[4949]: I0216 11:40:58.039027 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-p69nz"] Feb 16 11:40:58 crc kubenswrapper[4949]: I0216 11:40:58.055546 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-wbd7n"] Feb 16 11:40:58 crc kubenswrapper[4949]: I0216 11:40:58.075799 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-wbd7n"] Feb 16 11:40:58 crc kubenswrapper[4949]: I0216 11:40:58.088591 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-p69nz"] Feb 16 11:40:59 crc kubenswrapper[4949]: I0216 11:40:59.254648 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89c4baaa-f449-4fab-a513-1eec4a163af9" path="/var/lib/kubelet/pods/89c4baaa-f449-4fab-a513-1eec4a163af9/volumes" Feb 16 11:40:59 crc kubenswrapper[4949]: I0216 11:40:59.256228 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f40c7714-8669-4c33-8b1d-e3be853ca911" path="/var/lib/kubelet/pods/f40c7714-8669-4c33-8b1d-e3be853ca911/volumes" Feb 16 11:41:03 crc kubenswrapper[4949]: E0216 11:41:03.365320 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:41:03 crc kubenswrapper[4949]: E0216 11:41:03.365879 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:41:03 crc kubenswrapper[4949]: E0216 11:41:03.366024 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:41:03 crc kubenswrapper[4949]: E0216 11:41:03.368781 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:41:04 crc kubenswrapper[4949]: E0216 11:41:04.363651 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:41:04 crc kubenswrapper[4949]: E0216 11:41:04.363719 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:41:04 crc kubenswrapper[4949]: E0216 11:41:04.363923 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:41:04 crc kubenswrapper[4949]: E0216 11:41:04.365521 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:41:14 crc kubenswrapper[4949]: E0216 11:41:14.238124 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:41:17 crc kubenswrapper[4949]: I0216 11:41:17.096881 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-vh5z2"] Feb 16 11:41:17 crc kubenswrapper[4949]: I0216 11:41:17.112519 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-vh5z2"] Feb 16 11:41:17 crc kubenswrapper[4949]: E0216 11:41:17.238705 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:41:17 crc kubenswrapper[4949]: I0216 11:41:17.266832 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f" path="/var/lib/kubelet/pods/4e3e27fc-6ea1-4d2a-b9a9-22ec9abb2f3f/volumes" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.452993 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5bpcw"] Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.457698 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.468338 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5bpcw"] Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.576643 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-utilities\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.576693 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-catalog-content\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.576823 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpwx8\" (UniqueName: \"kubernetes.io/projected/b315acd2-3c0e-4629-822b-6681280acd91-kube-api-access-cpwx8\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.680395 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-utilities\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.680440 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-catalog-content\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.680474 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpwx8\" (UniqueName: \"kubernetes.io/projected/b315acd2-3c0e-4629-822b-6681280acd91-kube-api-access-cpwx8\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.681136 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-utilities\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.681309 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-catalog-content\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.702664 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpwx8\" (UniqueName: \"kubernetes.io/projected/b315acd2-3c0e-4629-822b-6681280acd91-kube-api-access-cpwx8\") pod \"redhat-operators-5bpcw\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:25 crc kubenswrapper[4949]: I0216 11:41:25.786304 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:26 crc kubenswrapper[4949]: I0216 11:41:26.295930 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5bpcw"] Feb 16 11:41:26 crc kubenswrapper[4949]: I0216 11:41:26.521238 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5bpcw" event={"ID":"b315acd2-3c0e-4629-822b-6681280acd91","Type":"ContainerStarted","Data":"6a87f2442511e5d48e28a8e9eb6d40bd252c80c36cf717f16cd0a57c62b3cf41"} Feb 16 11:41:27 crc kubenswrapper[4949]: I0216 11:41:27.538981 4949 generic.go:334] "Generic (PLEG): container finished" podID="b315acd2-3c0e-4629-822b-6681280acd91" containerID="068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693" exitCode=0 Feb 16 11:41:27 crc kubenswrapper[4949]: I0216 11:41:27.539057 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5bpcw" event={"ID":"b315acd2-3c0e-4629-822b-6681280acd91","Type":"ContainerDied","Data":"068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693"} Feb 16 11:41:28 crc kubenswrapper[4949]: E0216 11:41:28.239703 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:41:29 crc kubenswrapper[4949]: E0216 11:41:29.239442 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:41:29 crc kubenswrapper[4949]: I0216 11:41:29.571386 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5bpcw" event={"ID":"b315acd2-3c0e-4629-822b-6681280acd91","Type":"ContainerStarted","Data":"73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11"} Feb 16 11:41:33 crc kubenswrapper[4949]: I0216 11:41:33.627862 4949 generic.go:334] "Generic (PLEG): container finished" podID="b315acd2-3c0e-4629-822b-6681280acd91" containerID="73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11" exitCode=0 Feb 16 11:41:33 crc kubenswrapper[4949]: I0216 11:41:33.627946 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5bpcw" event={"ID":"b315acd2-3c0e-4629-822b-6681280acd91","Type":"ContainerDied","Data":"73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11"} Feb 16 11:41:34 crc kubenswrapper[4949]: I0216 11:41:34.550332 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:41:34 crc kubenswrapper[4949]: I0216 11:41:34.550611 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:41:34 crc kubenswrapper[4949]: I0216 11:41:34.643803 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5bpcw" event={"ID":"b315acd2-3c0e-4629-822b-6681280acd91","Type":"ContainerStarted","Data":"4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85"} Feb 16 11:41:34 crc kubenswrapper[4949]: I0216 11:41:34.683815 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5bpcw" podStartSLOduration=3.031460587 podStartE2EDuration="9.683783875s" podCreationTimestamp="2026-02-16 11:41:25 +0000 UTC" firstStartedPulling="2026-02-16 11:41:27.543962464 +0000 UTC m=+2077.173296659" lastFinishedPulling="2026-02-16 11:41:34.196285742 +0000 UTC m=+2083.825619947" observedRunningTime="2026-02-16 11:41:34.669254291 +0000 UTC m=+2084.298588496" watchObservedRunningTime="2026-02-16 11:41:34.683783875 +0000 UTC m=+2084.313118070" Feb 16 11:41:35 crc kubenswrapper[4949]: I0216 11:41:35.786563 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:35 crc kubenswrapper[4949]: I0216 11:41:35.786649 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:41:36 crc kubenswrapper[4949]: I0216 11:41:36.842735 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5bpcw" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="registry-server" probeResult="failure" output=< Feb 16 11:41:36 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:41:36 crc kubenswrapper[4949]: > Feb 16 11:41:39 crc kubenswrapper[4949]: E0216 11:41:39.240444 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:41:40 crc kubenswrapper[4949]: E0216 11:41:40.238709 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:41:46 crc kubenswrapper[4949]: I0216 11:41:46.840235 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5bpcw" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="registry-server" probeResult="failure" output=< Feb 16 11:41:46 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:41:46 crc kubenswrapper[4949]: > Feb 16 11:41:50 crc kubenswrapper[4949]: E0216 11:41:50.238195 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:41:51 crc kubenswrapper[4949]: E0216 11:41:51.297429 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:41:56 crc kubenswrapper[4949]: I0216 11:41:56.866544 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5bpcw" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="registry-server" probeResult="failure" output=< Feb 16 11:41:56 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:41:56 crc kubenswrapper[4949]: > Feb 16 11:41:57 crc kubenswrapper[4949]: I0216 11:41:57.126683 4949 scope.go:117] "RemoveContainer" containerID="b7be82351d99f931b79e512b13c2ed448abfad0e9e2efeb887e753115cd22e5e" Feb 16 11:41:57 crc kubenswrapper[4949]: I0216 11:41:57.161135 4949 scope.go:117] "RemoveContainer" containerID="b0a479d1b097c38dd3d6e4a671dd77bb73bff6f13bf66a1d12aacfdaded92d57" Feb 16 11:41:57 crc kubenswrapper[4949]: I0216 11:41:57.248702 4949 scope.go:117] "RemoveContainer" containerID="410a1728f81cf8302e7f4956877b44ecb0e45ed5d28f8328a94dea7b626acbb6" Feb 16 11:42:03 crc kubenswrapper[4949]: E0216 11:42:03.267416 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:42:04 crc kubenswrapper[4949]: I0216 11:42:04.550441 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:42:04 crc kubenswrapper[4949]: I0216 11:42:04.551156 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:42:05 crc kubenswrapper[4949]: I0216 11:42:05.856164 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:42:05 crc kubenswrapper[4949]: I0216 11:42:05.918933 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:42:06 crc kubenswrapper[4949]: I0216 11:42:06.105111 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5bpcw"] Feb 16 11:42:06 crc kubenswrapper[4949]: E0216 11:42:06.237798 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.134154 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5bpcw" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="registry-server" containerID="cri-o://4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85" gracePeriod=2 Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.729694 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.851866 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-catalog-content\") pod \"b315acd2-3c0e-4629-822b-6681280acd91\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.851971 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-utilities\") pod \"b315acd2-3c0e-4629-822b-6681280acd91\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.852096 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpwx8\" (UniqueName: \"kubernetes.io/projected/b315acd2-3c0e-4629-822b-6681280acd91-kube-api-access-cpwx8\") pod \"b315acd2-3c0e-4629-822b-6681280acd91\" (UID: \"b315acd2-3c0e-4629-822b-6681280acd91\") " Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.853387 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-utilities" (OuterVolumeSpecName: "utilities") pod "b315acd2-3c0e-4629-822b-6681280acd91" (UID: "b315acd2-3c0e-4629-822b-6681280acd91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.863539 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b315acd2-3c0e-4629-822b-6681280acd91-kube-api-access-cpwx8" (OuterVolumeSpecName: "kube-api-access-cpwx8") pod "b315acd2-3c0e-4629-822b-6681280acd91" (UID: "b315acd2-3c0e-4629-822b-6681280acd91"). InnerVolumeSpecName "kube-api-access-cpwx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.955751 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:07 crc kubenswrapper[4949]: I0216 11:42:07.955785 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpwx8\" (UniqueName: \"kubernetes.io/projected/b315acd2-3c0e-4629-822b-6681280acd91-kube-api-access-cpwx8\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.004725 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b315acd2-3c0e-4629-822b-6681280acd91" (UID: "b315acd2-3c0e-4629-822b-6681280acd91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.058077 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b315acd2-3c0e-4629-822b-6681280acd91-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.150106 4949 generic.go:334] "Generic (PLEG): container finished" podID="b315acd2-3c0e-4629-822b-6681280acd91" containerID="4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85" exitCode=0 Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.150157 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5bpcw" event={"ID":"b315acd2-3c0e-4629-822b-6681280acd91","Type":"ContainerDied","Data":"4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85"} Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.150212 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5bpcw" event={"ID":"b315acd2-3c0e-4629-822b-6681280acd91","Type":"ContainerDied","Data":"6a87f2442511e5d48e28a8e9eb6d40bd252c80c36cf717f16cd0a57c62b3cf41"} Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.150240 4949 scope.go:117] "RemoveContainer" containerID="4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.150408 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5bpcw" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.185520 4949 scope.go:117] "RemoveContainer" containerID="73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.210680 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5bpcw"] Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.216843 4949 scope.go:117] "RemoveContainer" containerID="068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.227310 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5bpcw"] Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.287937 4949 scope.go:117] "RemoveContainer" containerID="4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85" Feb 16 11:42:08 crc kubenswrapper[4949]: E0216 11:42:08.288864 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85\": container with ID starting with 4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85 not found: ID does not exist" containerID="4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.288914 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85"} err="failed to get container status \"4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85\": rpc error: code = NotFound desc = could not find container \"4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85\": container with ID starting with 4cacdab969c94b773eca01be04d4098d315d05f6322d07703fbc62e1a67dbc85 not found: ID does not exist" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.288944 4949 scope.go:117] "RemoveContainer" containerID="73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11" Feb 16 11:42:08 crc kubenswrapper[4949]: E0216 11:42:08.289535 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11\": container with ID starting with 73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11 not found: ID does not exist" containerID="73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.289637 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11"} err="failed to get container status \"73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11\": rpc error: code = NotFound desc = could not find container \"73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11\": container with ID starting with 73be1bbd688c2939e8e62da29a6a5881c63106aa7abc7f118d949e30f32a7c11 not found: ID does not exist" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.289710 4949 scope.go:117] "RemoveContainer" containerID="068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693" Feb 16 11:42:08 crc kubenswrapper[4949]: E0216 11:42:08.290164 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693\": container with ID starting with 068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693 not found: ID does not exist" containerID="068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693" Feb 16 11:42:08 crc kubenswrapper[4949]: I0216 11:42:08.290209 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693"} err="failed to get container status \"068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693\": rpc error: code = NotFound desc = could not find container \"068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693\": container with ID starting with 068cb3bd4bbaebaa0c57cffc35801c604cc9ee8e7b72749655d34eee6a7d6693 not found: ID does not exist" Feb 16 11:42:09 crc kubenswrapper[4949]: I0216 11:42:09.251128 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b315acd2-3c0e-4629-822b-6681280acd91" path="/var/lib/kubelet/pods/b315acd2-3c0e-4629-822b-6681280acd91/volumes" Feb 16 11:42:14 crc kubenswrapper[4949]: I0216 11:42:14.060658 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-9jxjt"] Feb 16 11:42:14 crc kubenswrapper[4949]: I0216 11:42:14.075723 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-9jxjt"] Feb 16 11:42:14 crc kubenswrapper[4949]: E0216 11:42:14.238500 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:42:15 crc kubenswrapper[4949]: I0216 11:42:15.253195 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88" path="/var/lib/kubelet/pods/064b7f6b-378a-4e2a-a3e9-1dd28e6b2a88/volumes" Feb 16 11:42:16 crc kubenswrapper[4949]: I0216 11:42:16.035442 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-7czt8"] Feb 16 11:42:16 crc kubenswrapper[4949]: I0216 11:42:16.055062 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-7czt8"] Feb 16 11:42:17 crc kubenswrapper[4949]: I0216 11:42:17.255087 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c894b203-cec9-49e4-9b92-bdf185ad11fa" path="/var/lib/kubelet/pods/c894b203-cec9-49e4-9b92-bdf185ad11fa/volumes" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.060974 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-575a-account-create-update-v2lxv"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.080377 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a249-account-create-update-9lm5t"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.108133 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-8lkkq"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.125450 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-575a-account-create-update-v2lxv"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.145127 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-581e-account-create-update-hnv29"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.157830 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-8lkkq"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.169419 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-a249-account-create-update-9lm5t"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.182017 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-581e-account-create-update-hnv29"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.469183 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zr6gq"] Feb 16 11:42:18 crc kubenswrapper[4949]: E0216 11:42:18.470197 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="extract-utilities" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.470211 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="extract-utilities" Feb 16 11:42:18 crc kubenswrapper[4949]: E0216 11:42:18.470226 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="registry-server" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.470233 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="registry-server" Feb 16 11:42:18 crc kubenswrapper[4949]: E0216 11:42:18.470271 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="extract-content" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.470278 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="extract-content" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.470495 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b315acd2-3c0e-4629-822b-6681280acd91" containerName="registry-server" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.472257 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.483345 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zr6gq"] Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.606877 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dfp9\" (UniqueName: \"kubernetes.io/projected/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-kube-api-access-9dfp9\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.607034 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-utilities\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.607147 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-catalog-content\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.709889 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dfp9\" (UniqueName: \"kubernetes.io/projected/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-kube-api-access-9dfp9\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.710029 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-utilities\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.710111 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-catalog-content\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.710615 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-catalog-content\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.710730 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-utilities\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.729139 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dfp9\" (UniqueName: \"kubernetes.io/projected/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-kube-api-access-9dfp9\") pod \"certified-operators-zr6gq\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:18 crc kubenswrapper[4949]: I0216 11:42:18.796680 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.262022 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2394da0b-9528-4d78-9fca-bf224b27ad5b" path="/var/lib/kubelet/pods/2394da0b-9528-4d78-9fca-bf224b27ad5b/volumes" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.264040 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80642c6c-aa06-4c35-ab96-3c2114889ee5" path="/var/lib/kubelet/pods/80642c6c-aa06-4c35-ab96-3c2114889ee5/volumes" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.269313 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0913052-0fd5-45bf-a268-47dc86f7af86" path="/var/lib/kubelet/pods/d0913052-0fd5-45bf-a268-47dc86f7af86/volumes" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.271059 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ead190f2-2e52-47d7-a1d1-9d4f19c83e5d" path="/var/lib/kubelet/pods/ead190f2-2e52-47d7-a1d1-9d4f19c83e5d/volumes" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.282585 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-58c48"] Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.285733 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.320344 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-58c48"] Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.399907 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zr6gq"] Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.452792 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-utilities\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.453026 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-catalog-content\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.453091 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pklgw\" (UniqueName: \"kubernetes.io/projected/d3dadb1d-f257-4c56-b5ab-66e7983b4010-kube-api-access-pklgw\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.555922 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-catalog-content\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.556573 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-catalog-content\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.557891 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pklgw\" (UniqueName: \"kubernetes.io/projected/d3dadb1d-f257-4c56-b5ab-66e7983b4010-kube-api-access-pklgw\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.558109 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-utilities\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.558660 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-utilities\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.581912 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pklgw\" (UniqueName: \"kubernetes.io/projected/d3dadb1d-f257-4c56-b5ab-66e7983b4010-kube-api-access-pklgw\") pod \"community-operators-58c48\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:19 crc kubenswrapper[4949]: I0216 11:42:19.614891 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:20 crc kubenswrapper[4949]: I0216 11:42:20.184754 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-58c48"] Feb 16 11:42:20 crc kubenswrapper[4949]: I0216 11:42:20.318272 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58c48" event={"ID":"d3dadb1d-f257-4c56-b5ab-66e7983b4010","Type":"ContainerStarted","Data":"7b821434e4d99af3ed0de4624f2e4e53ff83a83fc35a8ea0042e93404f97e868"} Feb 16 11:42:20 crc kubenswrapper[4949]: I0216 11:42:20.322147 4949 generic.go:334] "Generic (PLEG): container finished" podID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerID="fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa" exitCode=0 Feb 16 11:42:20 crc kubenswrapper[4949]: I0216 11:42:20.322213 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zr6gq" event={"ID":"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739","Type":"ContainerDied","Data":"fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa"} Feb 16 11:42:20 crc kubenswrapper[4949]: I0216 11:42:20.322245 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zr6gq" event={"ID":"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739","Type":"ContainerStarted","Data":"ff9b39809e25a0ef1ddef3389eb9782a265da1abf53c610399b913bd88369a19"} Feb 16 11:42:21 crc kubenswrapper[4949]: E0216 11:42:21.250698 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:42:21 crc kubenswrapper[4949]: I0216 11:42:21.333907 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zr6gq" event={"ID":"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739","Type":"ContainerStarted","Data":"48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878"} Feb 16 11:42:21 crc kubenswrapper[4949]: I0216 11:42:21.335808 4949 generic.go:334] "Generic (PLEG): container finished" podID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerID="107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278" exitCode=0 Feb 16 11:42:21 crc kubenswrapper[4949]: I0216 11:42:21.335901 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58c48" event={"ID":"d3dadb1d-f257-4c56-b5ab-66e7983b4010","Type":"ContainerDied","Data":"107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278"} Feb 16 11:42:22 crc kubenswrapper[4949]: I0216 11:42:22.348423 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58c48" event={"ID":"d3dadb1d-f257-4c56-b5ab-66e7983b4010","Type":"ContainerStarted","Data":"cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1"} Feb 16 11:42:23 crc kubenswrapper[4949]: I0216 11:42:23.360602 4949 generic.go:334] "Generic (PLEG): container finished" podID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerID="48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878" exitCode=0 Feb 16 11:42:23 crc kubenswrapper[4949]: I0216 11:42:23.360709 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zr6gq" event={"ID":"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739","Type":"ContainerDied","Data":"48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878"} Feb 16 11:42:24 crc kubenswrapper[4949]: I0216 11:42:24.378993 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zr6gq" event={"ID":"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739","Type":"ContainerStarted","Data":"c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7"} Feb 16 11:42:24 crc kubenswrapper[4949]: I0216 11:42:24.408819 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zr6gq" podStartSLOduration=2.894982202 podStartE2EDuration="6.408795183s" podCreationTimestamp="2026-02-16 11:42:18 +0000 UTC" firstStartedPulling="2026-02-16 11:42:20.325479301 +0000 UTC m=+2129.954813466" lastFinishedPulling="2026-02-16 11:42:23.839292262 +0000 UTC m=+2133.468626447" observedRunningTime="2026-02-16 11:42:24.399438867 +0000 UTC m=+2134.028773042" watchObservedRunningTime="2026-02-16 11:42:24.408795183 +0000 UTC m=+2134.038129348" Feb 16 11:42:25 crc kubenswrapper[4949]: I0216 11:42:25.392611 4949 generic.go:334] "Generic (PLEG): container finished" podID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerID="cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1" exitCode=0 Feb 16 11:42:25 crc kubenswrapper[4949]: I0216 11:42:25.392690 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58c48" event={"ID":"d3dadb1d-f257-4c56-b5ab-66e7983b4010","Type":"ContainerDied","Data":"cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1"} Feb 16 11:42:26 crc kubenswrapper[4949]: I0216 11:42:26.408087 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58c48" event={"ID":"d3dadb1d-f257-4c56-b5ab-66e7983b4010","Type":"ContainerStarted","Data":"11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27"} Feb 16 11:42:26 crc kubenswrapper[4949]: I0216 11:42:26.433834 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-58c48" podStartSLOduration=2.62434256 podStartE2EDuration="7.433799876s" podCreationTimestamp="2026-02-16 11:42:19 +0000 UTC" firstStartedPulling="2026-02-16 11:42:21.337574105 +0000 UTC m=+2130.966908270" lastFinishedPulling="2026-02-16 11:42:26.147031411 +0000 UTC m=+2135.776365586" observedRunningTime="2026-02-16 11:42:26.42899265 +0000 UTC m=+2136.058326815" watchObservedRunningTime="2026-02-16 11:42:26.433799876 +0000 UTC m=+2136.063134081" Feb 16 11:42:28 crc kubenswrapper[4949]: E0216 11:42:28.238656 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:42:28 crc kubenswrapper[4949]: I0216 11:42:28.797287 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:28 crc kubenswrapper[4949]: I0216 11:42:28.797616 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:28 crc kubenswrapper[4949]: I0216 11:42:28.850351 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:29 crc kubenswrapper[4949]: I0216 11:42:29.507448 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:29 crc kubenswrapper[4949]: I0216 11:42:29.615866 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:29 crc kubenswrapper[4949]: I0216 11:42:29.616397 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:30 crc kubenswrapper[4949]: I0216 11:42:30.681646 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-58c48" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="registry-server" probeResult="failure" output=< Feb 16 11:42:30 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:42:30 crc kubenswrapper[4949]: > Feb 16 11:42:32 crc kubenswrapper[4949]: I0216 11:42:32.465726 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zr6gq"] Feb 16 11:42:32 crc kubenswrapper[4949]: I0216 11:42:32.466321 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zr6gq" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerName="registry-server" containerID="cri-o://c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7" gracePeriod=2 Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.088588 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:33 crc kubenswrapper[4949]: E0216 11:42:33.237458 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.240445 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-catalog-content\") pod \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.240715 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dfp9\" (UniqueName: \"kubernetes.io/projected/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-kube-api-access-9dfp9\") pod \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.241036 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-utilities\") pod \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\" (UID: \"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739\") " Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.241502 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-utilities" (OuterVolumeSpecName: "utilities") pod "b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" (UID: "b03f2e0f-7f53-4fb3-ba61-03bfdaa48739"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.241980 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.248697 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-kube-api-access-9dfp9" (OuterVolumeSpecName: "kube-api-access-9dfp9") pod "b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" (UID: "b03f2e0f-7f53-4fb3-ba61-03bfdaa48739"). InnerVolumeSpecName "kube-api-access-9dfp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.307453 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" (UID: "b03f2e0f-7f53-4fb3-ba61-03bfdaa48739"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.344120 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dfp9\" (UniqueName: \"kubernetes.io/projected/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-kube-api-access-9dfp9\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.344158 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.614744 4949 generic.go:334] "Generic (PLEG): container finished" podID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerID="c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7" exitCode=0 Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.614800 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zr6gq" event={"ID":"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739","Type":"ContainerDied","Data":"c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7"} Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.614840 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zr6gq" event={"ID":"b03f2e0f-7f53-4fb3-ba61-03bfdaa48739","Type":"ContainerDied","Data":"ff9b39809e25a0ef1ddef3389eb9782a265da1abf53c610399b913bd88369a19"} Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.614863 4949 scope.go:117] "RemoveContainer" containerID="c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.615041 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zr6gq" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.672695 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zr6gq"] Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.675474 4949 scope.go:117] "RemoveContainer" containerID="48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.689052 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zr6gq"] Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.709746 4949 scope.go:117] "RemoveContainer" containerID="fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.768522 4949 scope.go:117] "RemoveContainer" containerID="c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7" Feb 16 11:42:33 crc kubenswrapper[4949]: E0216 11:42:33.768940 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7\": container with ID starting with c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7 not found: ID does not exist" containerID="c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.768974 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7"} err="failed to get container status \"c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7\": rpc error: code = NotFound desc = could not find container \"c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7\": container with ID starting with c60b6c56f5bb18a192507ee00ca8c463b232329c4290fa1d576a6669cc8f7bc7 not found: ID does not exist" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.769020 4949 scope.go:117] "RemoveContainer" containerID="48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878" Feb 16 11:42:33 crc kubenswrapper[4949]: E0216 11:42:33.769304 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878\": container with ID starting with 48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878 not found: ID does not exist" containerID="48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.769328 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878"} err="failed to get container status \"48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878\": rpc error: code = NotFound desc = could not find container \"48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878\": container with ID starting with 48c5369bf3a06d13d9bc7e0cd183f2191aeab939e35cc125d2d541315f488878 not found: ID does not exist" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.769389 4949 scope.go:117] "RemoveContainer" containerID="fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa" Feb 16 11:42:33 crc kubenswrapper[4949]: E0216 11:42:33.769709 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa\": container with ID starting with fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa not found: ID does not exist" containerID="fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa" Feb 16 11:42:33 crc kubenswrapper[4949]: I0216 11:42:33.769745 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa"} err="failed to get container status \"fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa\": rpc error: code = NotFound desc = could not find container \"fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa\": container with ID starting with fc7a05f1dc35b475a1a616d10d4ae738c94838237e88e8faa138506cb6df40aa not found: ID does not exist" Feb 16 11:42:34 crc kubenswrapper[4949]: I0216 11:42:34.550662 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:42:34 crc kubenswrapper[4949]: I0216 11:42:34.550943 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:42:34 crc kubenswrapper[4949]: I0216 11:42:34.550990 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:42:34 crc kubenswrapper[4949]: I0216 11:42:34.552203 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"096a296a43c90ecf253548b6f73536cd24c074d6cf2f0b80b69fc989695ff765"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:42:34 crc kubenswrapper[4949]: I0216 11:42:34.552271 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://096a296a43c90ecf253548b6f73536cd24c074d6cf2f0b80b69fc989695ff765" gracePeriod=600 Feb 16 11:42:35 crc kubenswrapper[4949]: I0216 11:42:35.251708 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" path="/var/lib/kubelet/pods/b03f2e0f-7f53-4fb3-ba61-03bfdaa48739/volumes" Feb 16 11:42:35 crc kubenswrapper[4949]: I0216 11:42:35.640046 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="096a296a43c90ecf253548b6f73536cd24c074d6cf2f0b80b69fc989695ff765" exitCode=0 Feb 16 11:42:35 crc kubenswrapper[4949]: I0216 11:42:35.640086 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"096a296a43c90ecf253548b6f73536cd24c074d6cf2f0b80b69fc989695ff765"} Feb 16 11:42:35 crc kubenswrapper[4949]: I0216 11:42:35.640142 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4"} Feb 16 11:42:35 crc kubenswrapper[4949]: I0216 11:42:35.640161 4949 scope.go:117] "RemoveContainer" containerID="fcc25fdabb7245fb2b21fe157daaa0479c92c3c1e231adb091c93ee7fe6b8437" Feb 16 11:42:39 crc kubenswrapper[4949]: E0216 11:42:39.240482 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:42:39 crc kubenswrapper[4949]: I0216 11:42:39.671167 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:39 crc kubenswrapper[4949]: I0216 11:42:39.729506 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:40 crc kubenswrapper[4949]: I0216 11:42:40.255544 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-58c48"] Feb 16 11:42:41 crc kubenswrapper[4949]: I0216 11:42:41.709323 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-58c48" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="registry-server" containerID="cri-o://11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27" gracePeriod=2 Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.252333 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.402193 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pklgw\" (UniqueName: \"kubernetes.io/projected/d3dadb1d-f257-4c56-b5ab-66e7983b4010-kube-api-access-pklgw\") pod \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.402303 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-catalog-content\") pod \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.402396 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-utilities\") pod \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\" (UID: \"d3dadb1d-f257-4c56-b5ab-66e7983b4010\") " Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.403615 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-utilities" (OuterVolumeSpecName: "utilities") pod "d3dadb1d-f257-4c56-b5ab-66e7983b4010" (UID: "d3dadb1d-f257-4c56-b5ab-66e7983b4010"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.410000 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3dadb1d-f257-4c56-b5ab-66e7983b4010-kube-api-access-pklgw" (OuterVolumeSpecName: "kube-api-access-pklgw") pod "d3dadb1d-f257-4c56-b5ab-66e7983b4010" (UID: "d3dadb1d-f257-4c56-b5ab-66e7983b4010"). InnerVolumeSpecName "kube-api-access-pklgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.472241 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3dadb1d-f257-4c56-b5ab-66e7983b4010" (UID: "d3dadb1d-f257-4c56-b5ab-66e7983b4010"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.506472 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pklgw\" (UniqueName: \"kubernetes.io/projected/d3dadb1d-f257-4c56-b5ab-66e7983b4010-kube-api-access-pklgw\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.506781 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.506796 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3dadb1d-f257-4c56-b5ab-66e7983b4010-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.722895 4949 generic.go:334] "Generic (PLEG): container finished" podID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerID="11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27" exitCode=0 Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.722941 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58c48" event={"ID":"d3dadb1d-f257-4c56-b5ab-66e7983b4010","Type":"ContainerDied","Data":"11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27"} Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.722969 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58c48" event={"ID":"d3dadb1d-f257-4c56-b5ab-66e7983b4010","Type":"ContainerDied","Data":"7b821434e4d99af3ed0de4624f2e4e53ff83a83fc35a8ea0042e93404f97e868"} Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.722987 4949 scope.go:117] "RemoveContainer" containerID="11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.723034 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58c48" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.767007 4949 scope.go:117] "RemoveContainer" containerID="cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.771246 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-58c48"] Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.783829 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-58c48"] Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.792931 4949 scope.go:117] "RemoveContainer" containerID="107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.871524 4949 scope.go:117] "RemoveContainer" containerID="11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27" Feb 16 11:42:42 crc kubenswrapper[4949]: E0216 11:42:42.872747 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27\": container with ID starting with 11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27 not found: ID does not exist" containerID="11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.872788 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27"} err="failed to get container status \"11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27\": rpc error: code = NotFound desc = could not find container \"11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27\": container with ID starting with 11c3dfc2cb98324c6375c379929194a4aa944a1ffd650fcee901ae873c180b27 not found: ID does not exist" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.872818 4949 scope.go:117] "RemoveContainer" containerID="cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1" Feb 16 11:42:42 crc kubenswrapper[4949]: E0216 11:42:42.873478 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1\": container with ID starting with cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1 not found: ID does not exist" containerID="cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.873502 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1"} err="failed to get container status \"cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1\": rpc error: code = NotFound desc = could not find container \"cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1\": container with ID starting with cbdeb25057a19470bf93494e089bba90c39b7eb9185a6b0ffc610dc7fe4548c1 not found: ID does not exist" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.873519 4949 scope.go:117] "RemoveContainer" containerID="107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278" Feb 16 11:42:42 crc kubenswrapper[4949]: E0216 11:42:42.873740 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278\": container with ID starting with 107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278 not found: ID does not exist" containerID="107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278" Feb 16 11:42:42 crc kubenswrapper[4949]: I0216 11:42:42.873761 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278"} err="failed to get container status \"107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278\": rpc error: code = NotFound desc = could not find container \"107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278\": container with ID starting with 107bb7aafd0f28e1ab1bfb6b1574aef0a075691933ea57a88488ad28a6ad2278 not found: ID does not exist" Feb 16 11:42:43 crc kubenswrapper[4949]: I0216 11:42:43.249442 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" path="/var/lib/kubelet/pods/d3dadb1d-f257-4c56-b5ab-66e7983b4010/volumes" Feb 16 11:42:47 crc kubenswrapper[4949]: E0216 11:42:47.239362 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:42:52 crc kubenswrapper[4949]: E0216 11:42:52.237998 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:42:56 crc kubenswrapper[4949]: I0216 11:42:56.056057 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rv8d4"] Feb 16 11:42:56 crc kubenswrapper[4949]: I0216 11:42:56.069018 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rv8d4"] Feb 16 11:42:57 crc kubenswrapper[4949]: I0216 11:42:57.248250 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0794dfd5-3a8f-4e35-bb3e-2dc32881680d" path="/var/lib/kubelet/pods/0794dfd5-3a8f-4e35-bb3e-2dc32881680d/volumes" Feb 16 11:42:57 crc kubenswrapper[4949]: I0216 11:42:57.341661 4949 scope.go:117] "RemoveContainer" containerID="0710d8646b09c50a7dbd800d1acadd884e58e234ee8e7ac07d53107b4e3acb1c" Feb 16 11:42:57 crc kubenswrapper[4949]: I0216 11:42:57.370789 4949 scope.go:117] "RemoveContainer" containerID="52daeb7bd25242f39a5e1bc367c4ffcc77b2cf612333d7a84f5d303f8e944ce9" Feb 16 11:42:57 crc kubenswrapper[4949]: I0216 11:42:57.441815 4949 scope.go:117] "RemoveContainer" containerID="bbbf95ad18f30db9efb7633d53d034fcb6ac39b560cf6f60e8dbf1a829f95181" Feb 16 11:42:57 crc kubenswrapper[4949]: I0216 11:42:57.499579 4949 scope.go:117] "RemoveContainer" containerID="3c591b2c1c7369149a189d0b2cc914d3fc49e8cb2cac8eaa164a499c20cf9927" Feb 16 11:42:57 crc kubenswrapper[4949]: I0216 11:42:57.554216 4949 scope.go:117] "RemoveContainer" containerID="4b7d10972971dc5c3e73274936a9c4bc317a17e136c3159f6ea5903bc1b364a7" Feb 16 11:42:57 crc kubenswrapper[4949]: I0216 11:42:57.600312 4949 scope.go:117] "RemoveContainer" containerID="43107383f85e8ca898ba418ec983e30012621aedda886ca3f7b3cda7d3ff5f04" Feb 16 11:42:57 crc kubenswrapper[4949]: I0216 11:42:57.649423 4949 scope.go:117] "RemoveContainer" containerID="ecd9cbd01aaea70701e6bff0939b9a10868831f1e0a00f0120c11b71f9653284" Feb 16 11:43:01 crc kubenswrapper[4949]: E0216 11:43:01.248590 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:43:03 crc kubenswrapper[4949]: E0216 11:43:03.238630 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:43:12 crc kubenswrapper[4949]: I0216 11:43:12.039339 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-bf0e-account-create-update-csrj2"] Feb 16 11:43:12 crc kubenswrapper[4949]: I0216 11:43:12.067452 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-sgsmx"] Feb 16 11:43:12 crc kubenswrapper[4949]: I0216 11:43:12.082008 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-bf0e-account-create-update-csrj2"] Feb 16 11:43:12 crc kubenswrapper[4949]: I0216 11:43:12.094137 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-sgsmx"] Feb 16 11:43:13 crc kubenswrapper[4949]: I0216 11:43:13.249283 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1" path="/var/lib/kubelet/pods/d70eafc1-c4e9-4f70-bb2c-0a4b7e54bef1/volumes" Feb 16 11:43:13 crc kubenswrapper[4949]: I0216 11:43:13.251406 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50" path="/var/lib/kubelet/pods/ddd1d576-d2cb-4f8b-aa99-9d31cd1d6f50/volumes" Feb 16 11:43:14 crc kubenswrapper[4949]: E0216 11:43:14.240568 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:43:16 crc kubenswrapper[4949]: E0216 11:43:16.238029 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:43:24 crc kubenswrapper[4949]: I0216 11:43:24.071765 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-hm2m6"] Feb 16 11:43:24 crc kubenswrapper[4949]: I0216 11:43:24.083913 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-j9tzp"] Feb 16 11:43:24 crc kubenswrapper[4949]: I0216 11:43:24.095830 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-j9tzp"] Feb 16 11:43:24 crc kubenswrapper[4949]: I0216 11:43:24.106273 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-hm2m6"] Feb 16 11:43:25 crc kubenswrapper[4949]: I0216 11:43:25.251446 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a1e0048-b760-4b8c-a65b-7f0224833721" path="/var/lib/kubelet/pods/6a1e0048-b760-4b8c-a65b-7f0224833721/volumes" Feb 16 11:43:25 crc kubenswrapper[4949]: I0216 11:43:25.252687 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0bd872f-2560-48de-997e-a2ce07f4f0b7" path="/var/lib/kubelet/pods/a0bd872f-2560-48de-997e-a2ce07f4f0b7/volumes" Feb 16 11:43:28 crc kubenswrapper[4949]: E0216 11:43:28.238374 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:43:28 crc kubenswrapper[4949]: E0216 11:43:28.238456 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:43:32 crc kubenswrapper[4949]: I0216 11:43:32.032471 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-8tvgc"] Feb 16 11:43:32 crc kubenswrapper[4949]: I0216 11:43:32.044095 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-8tvgc"] Feb 16 11:43:33 crc kubenswrapper[4949]: I0216 11:43:33.256324 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="167f9139-7451-4312-9b89-ebff291c748a" path="/var/lib/kubelet/pods/167f9139-7451-4312-9b89-ebff291c748a/volumes" Feb 16 11:43:41 crc kubenswrapper[4949]: E0216 11:43:41.247762 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:43:42 crc kubenswrapper[4949]: E0216 11:43:42.237851 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:43:55 crc kubenswrapper[4949]: E0216 11:43:55.239720 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:43:56 crc kubenswrapper[4949]: E0216 11:43:56.237766 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:43:57 crc kubenswrapper[4949]: I0216 11:43:57.887035 4949 scope.go:117] "RemoveContainer" containerID="3c6dcee2df9ec41a1c4f4dd11f0181c87b6dc8ea304f8678fc30f6a1de7689a4" Feb 16 11:43:57 crc kubenswrapper[4949]: I0216 11:43:57.918544 4949 scope.go:117] "RemoveContainer" containerID="6b827f03206bb4c4e85fc8cee05019ee2e5acc8d8bbdbfe9b7315957449e82e1" Feb 16 11:43:57 crc kubenswrapper[4949]: I0216 11:43:57.996516 4949 scope.go:117] "RemoveContainer" containerID="2c69fed30e76ccad4a5e1a07767e49ecbac0b90b62d8597551ceb5e02798567b" Feb 16 11:43:58 crc kubenswrapper[4949]: I0216 11:43:58.055599 4949 scope.go:117] "RemoveContainer" containerID="b787d4f7ac302d60713add31accf46680c357ad5ce0ab8cb98e19dadc1700aaa" Feb 16 11:43:58 crc kubenswrapper[4949]: I0216 11:43:58.108807 4949 scope.go:117] "RemoveContainer" containerID="ba8ab6c66a20430c703e5c48bd3835c082ca95f7daaae276285a64f3466720d9" Feb 16 11:44:03 crc kubenswrapper[4949]: I0216 11:44:03.050379 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-j5mtk"] Feb 16 11:44:03 crc kubenswrapper[4949]: I0216 11:44:03.080855 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-j5mtk"] Feb 16 11:44:03 crc kubenswrapper[4949]: I0216 11:44:03.254674 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a0bf28e-fc72-402a-b5e9-077eb5218110" path="/var/lib/kubelet/pods/6a0bf28e-fc72-402a-b5e9-077eb5218110/volumes" Feb 16 11:44:08 crc kubenswrapper[4949]: E0216 11:44:08.237811 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:44:10 crc kubenswrapper[4949]: E0216 11:44:10.238477 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:44:22 crc kubenswrapper[4949]: E0216 11:44:22.238472 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:44:23 crc kubenswrapper[4949]: E0216 11:44:23.241282 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:44:34 crc kubenswrapper[4949]: I0216 11:44:34.550296 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:44:34 crc kubenswrapper[4949]: I0216 11:44:34.552081 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:44:35 crc kubenswrapper[4949]: E0216 11:44:35.241989 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:44:38 crc kubenswrapper[4949]: E0216 11:44:38.236668 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:44:47 crc kubenswrapper[4949]: E0216 11:44:47.237888 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:44:53 crc kubenswrapper[4949]: E0216 11:44:53.239716 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:44:58 crc kubenswrapper[4949]: I0216 11:44:58.295594 4949 scope.go:117] "RemoveContainer" containerID="c47e71feba59c5aa1f640a9adb819e43ba30466d91d2c60801613210a1e77339" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.170485 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l"] Feb 16 11:45:00 crc kubenswrapper[4949]: E0216 11:45:00.171850 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerName="extract-content" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.171876 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerName="extract-content" Feb 16 11:45:00 crc kubenswrapper[4949]: E0216 11:45:00.171932 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerName="extract-utilities" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.171944 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerName="extract-utilities" Feb 16 11:45:00 crc kubenswrapper[4949]: E0216 11:45:00.171962 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="extract-content" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.171974 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="extract-content" Feb 16 11:45:00 crc kubenswrapper[4949]: E0216 11:45:00.171992 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerName="registry-server" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.172003 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerName="registry-server" Feb 16 11:45:00 crc kubenswrapper[4949]: E0216 11:45:00.172033 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="extract-utilities" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.172043 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="extract-utilities" Feb 16 11:45:00 crc kubenswrapper[4949]: E0216 11:45:00.172076 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="registry-server" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.172089 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="registry-server" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.172562 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3dadb1d-f257-4c56-b5ab-66e7983b4010" containerName="registry-server" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.172628 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b03f2e0f-7f53-4fb3-ba61-03bfdaa48739" containerName="registry-server" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.173895 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.182726 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l"] Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.185313 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.185733 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 16 11:45:00 crc kubenswrapper[4949]: E0216 11:45:00.239610 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.268940 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08ecf914-cb28-4f47-a401-fdce7e6a7f52-secret-volume\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.269349 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlwhn\" (UniqueName: \"kubernetes.io/projected/08ecf914-cb28-4f47-a401-fdce7e6a7f52-kube-api-access-tlwhn\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.269396 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08ecf914-cb28-4f47-a401-fdce7e6a7f52-config-volume\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.371865 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlwhn\" (UniqueName: \"kubernetes.io/projected/08ecf914-cb28-4f47-a401-fdce7e6a7f52-kube-api-access-tlwhn\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.371927 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08ecf914-cb28-4f47-a401-fdce7e6a7f52-config-volume\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.372114 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08ecf914-cb28-4f47-a401-fdce7e6a7f52-secret-volume\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.374083 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08ecf914-cb28-4f47-a401-fdce7e6a7f52-config-volume\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.390035 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08ecf914-cb28-4f47-a401-fdce7e6a7f52-secret-volume\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.390624 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlwhn\" (UniqueName: \"kubernetes.io/projected/08ecf914-cb28-4f47-a401-fdce7e6a7f52-kube-api-access-tlwhn\") pod \"collect-profiles-29520705-qfv6l\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.504996 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:00 crc kubenswrapper[4949]: I0216 11:45:00.979174 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l"] Feb 16 11:45:01 crc kubenswrapper[4949]: I0216 11:45:01.328093 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" event={"ID":"08ecf914-cb28-4f47-a401-fdce7e6a7f52","Type":"ContainerStarted","Data":"0939a9289f4a91f76e2feb9b260e3b454a3968a4e2d2938d9f28d3f85b0eb57a"} Feb 16 11:45:01 crc kubenswrapper[4949]: I0216 11:45:01.328510 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" event={"ID":"08ecf914-cb28-4f47-a401-fdce7e6a7f52","Type":"ContainerStarted","Data":"a755058638b2b888f5ddbe6790d1e9a1452b6b29ced01104ad1d207f1b813960"} Feb 16 11:45:01 crc kubenswrapper[4949]: I0216 11:45:01.342973 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" podStartSLOduration=1.342945699 podStartE2EDuration="1.342945699s" podCreationTimestamp="2026-02-16 11:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 11:45:01.342372433 +0000 UTC m=+2290.971706608" watchObservedRunningTime="2026-02-16 11:45:01.342945699 +0000 UTC m=+2290.972279884" Feb 16 11:45:02 crc kubenswrapper[4949]: I0216 11:45:02.348916 4949 generic.go:334] "Generic (PLEG): container finished" podID="08ecf914-cb28-4f47-a401-fdce7e6a7f52" containerID="0939a9289f4a91f76e2feb9b260e3b454a3968a4e2d2938d9f28d3f85b0eb57a" exitCode=0 Feb 16 11:45:02 crc kubenswrapper[4949]: I0216 11:45:02.349034 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" event={"ID":"08ecf914-cb28-4f47-a401-fdce7e6a7f52","Type":"ContainerDied","Data":"0939a9289f4a91f76e2feb9b260e3b454a3968a4e2d2938d9f28d3f85b0eb57a"} Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.774206 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.858962 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlwhn\" (UniqueName: \"kubernetes.io/projected/08ecf914-cb28-4f47-a401-fdce7e6a7f52-kube-api-access-tlwhn\") pod \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.859411 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08ecf914-cb28-4f47-a401-fdce7e6a7f52-config-volume\") pod \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.859572 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08ecf914-cb28-4f47-a401-fdce7e6a7f52-secret-volume\") pod \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\" (UID: \"08ecf914-cb28-4f47-a401-fdce7e6a7f52\") " Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.859891 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08ecf914-cb28-4f47-a401-fdce7e6a7f52-config-volume" (OuterVolumeSpecName: "config-volume") pod "08ecf914-cb28-4f47-a401-fdce7e6a7f52" (UID: "08ecf914-cb28-4f47-a401-fdce7e6a7f52"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.860731 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08ecf914-cb28-4f47-a401-fdce7e6a7f52-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.867064 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08ecf914-cb28-4f47-a401-fdce7e6a7f52-kube-api-access-tlwhn" (OuterVolumeSpecName: "kube-api-access-tlwhn") pod "08ecf914-cb28-4f47-a401-fdce7e6a7f52" (UID: "08ecf914-cb28-4f47-a401-fdce7e6a7f52"). InnerVolumeSpecName "kube-api-access-tlwhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.871177 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ecf914-cb28-4f47-a401-fdce7e6a7f52-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "08ecf914-cb28-4f47-a401-fdce7e6a7f52" (UID: "08ecf914-cb28-4f47-a401-fdce7e6a7f52"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.963728 4949 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08ecf914-cb28-4f47-a401-fdce7e6a7f52-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 16 11:45:03 crc kubenswrapper[4949]: I0216 11:45:03.963770 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlwhn\" (UniqueName: \"kubernetes.io/projected/08ecf914-cb28-4f47-a401-fdce7e6a7f52-kube-api-access-tlwhn\") on node \"crc\" DevicePath \"\"" Feb 16 11:45:04 crc kubenswrapper[4949]: I0216 11:45:04.356160 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59"] Feb 16 11:45:04 crc kubenswrapper[4949]: I0216 11:45:04.368416 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520660-45x59"] Feb 16 11:45:04 crc kubenswrapper[4949]: I0216 11:45:04.385275 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" event={"ID":"08ecf914-cb28-4f47-a401-fdce7e6a7f52","Type":"ContainerDied","Data":"a755058638b2b888f5ddbe6790d1e9a1452b6b29ced01104ad1d207f1b813960"} Feb 16 11:45:04 crc kubenswrapper[4949]: I0216 11:45:04.385324 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a755058638b2b888f5ddbe6790d1e9a1452b6b29ced01104ad1d207f1b813960" Feb 16 11:45:04 crc kubenswrapper[4949]: I0216 11:45:04.385390 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l" Feb 16 11:45:04 crc kubenswrapper[4949]: I0216 11:45:04.550031 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:45:04 crc kubenswrapper[4949]: I0216 11:45:04.550374 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:45:05 crc kubenswrapper[4949]: E0216 11:45:05.238683 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:45:05 crc kubenswrapper[4949]: I0216 11:45:05.254266 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc6af30d-b0c6-47f1-90d2-9d297a7d2b23" path="/var/lib/kubelet/pods/dc6af30d-b0c6-47f1-90d2-9d297a7d2b23/volumes" Feb 16 11:45:12 crc kubenswrapper[4949]: E0216 11:45:12.238789 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:45:17 crc kubenswrapper[4949]: E0216 11:45:17.237944 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:45:24 crc kubenswrapper[4949]: E0216 11:45:24.237566 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:45:31 crc kubenswrapper[4949]: E0216 11:45:31.249566 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:45:34 crc kubenswrapper[4949]: I0216 11:45:34.550799 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:45:34 crc kubenswrapper[4949]: I0216 11:45:34.551263 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:45:34 crc kubenswrapper[4949]: I0216 11:45:34.551332 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:45:34 crc kubenswrapper[4949]: I0216 11:45:34.552806 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:45:34 crc kubenswrapper[4949]: I0216 11:45:34.552884 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" gracePeriod=600 Feb 16 11:45:34 crc kubenswrapper[4949]: I0216 11:45:34.690091 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" exitCode=0 Feb 16 11:45:34 crc kubenswrapper[4949]: I0216 11:45:34.690233 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4"} Feb 16 11:45:34 crc kubenswrapper[4949]: I0216 11:45:34.690472 4949 scope.go:117] "RemoveContainer" containerID="096a296a43c90ecf253548b6f73536cd24c074d6cf2f0b80b69fc989695ff765" Feb 16 11:45:34 crc kubenswrapper[4949]: E0216 11:45:34.697230 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:45:35 crc kubenswrapper[4949]: I0216 11:45:35.705228 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:45:35 crc kubenswrapper[4949]: E0216 11:45:35.707249 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:45:36 crc kubenswrapper[4949]: E0216 11:45:36.237542 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:45:46 crc kubenswrapper[4949]: E0216 11:45:46.238566 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:45:48 crc kubenswrapper[4949]: E0216 11:45:48.239666 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:45:49 crc kubenswrapper[4949]: I0216 11:45:49.236485 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:45:49 crc kubenswrapper[4949]: E0216 11:45:49.237056 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:45:58 crc kubenswrapper[4949]: I0216 11:45:58.395025 4949 scope.go:117] "RemoveContainer" containerID="ced7e0acb5315baa549cbfb4b821348a6f1b8f552adb2f7597fe780b7fe351fa" Feb 16 11:46:00 crc kubenswrapper[4949]: E0216 11:46:00.237992 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:46:03 crc kubenswrapper[4949]: I0216 11:46:03.236385 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:46:03 crc kubenswrapper[4949]: E0216 11:46:03.237284 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:46:03 crc kubenswrapper[4949]: E0216 11:46:03.237629 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:46:11 crc kubenswrapper[4949]: I0216 11:46:11.249258 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:46:11 crc kubenswrapper[4949]: E0216 11:46:11.373447 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:46:11 crc kubenswrapper[4949]: E0216 11:46:11.373524 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:46:11 crc kubenswrapper[4949]: E0216 11:46:11.373695 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:46:11 crc kubenswrapper[4949]: E0216 11:46:11.375034 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:46:14 crc kubenswrapper[4949]: I0216 11:46:14.235461 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:46:14 crc kubenswrapper[4949]: E0216 11:46:14.236379 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:46:16 crc kubenswrapper[4949]: I0216 11:46:16.158657 4949 generic.go:334] "Generic (PLEG): container finished" podID="1e96f01e-1b19-4190-9109-75322770d9ba" containerID="257dc4ac92be4a672883f0d062443f3d1aeedd408c80e83b1d9074fe40dc999c" exitCode=2 Feb 16 11:46:16 crc kubenswrapper[4949]: I0216 11:46:16.158726 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" event={"ID":"1e96f01e-1b19-4190-9109-75322770d9ba","Type":"ContainerDied","Data":"257dc4ac92be4a672883f0d062443f3d1aeedd408c80e83b1d9074fe40dc999c"} Feb 16 11:46:16 crc kubenswrapper[4949]: E0216 11:46:16.365565 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:46:16 crc kubenswrapper[4949]: E0216 11:46:16.365921 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:46:16 crc kubenswrapper[4949]: E0216 11:46:16.366106 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:46:16 crc kubenswrapper[4949]: E0216 11:46:16.367344 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.676277 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.795569 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztvzv\" (UniqueName: \"kubernetes.io/projected/1e96f01e-1b19-4190-9109-75322770d9ba-kube-api-access-ztvzv\") pod \"1e96f01e-1b19-4190-9109-75322770d9ba\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.796732 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-ssh-key-openstack-edpm-ipam\") pod \"1e96f01e-1b19-4190-9109-75322770d9ba\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.796773 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-inventory\") pod \"1e96f01e-1b19-4190-9109-75322770d9ba\" (UID: \"1e96f01e-1b19-4190-9109-75322770d9ba\") " Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.831258 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e96f01e-1b19-4190-9109-75322770d9ba-kube-api-access-ztvzv" (OuterVolumeSpecName: "kube-api-access-ztvzv") pod "1e96f01e-1b19-4190-9109-75322770d9ba" (UID: "1e96f01e-1b19-4190-9109-75322770d9ba"). InnerVolumeSpecName "kube-api-access-ztvzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.844224 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-inventory" (OuterVolumeSpecName: "inventory") pod "1e96f01e-1b19-4190-9109-75322770d9ba" (UID: "1e96f01e-1b19-4190-9109-75322770d9ba"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.868742 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1e96f01e-1b19-4190-9109-75322770d9ba" (UID: "1e96f01e-1b19-4190-9109-75322770d9ba"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.902185 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztvzv\" (UniqueName: \"kubernetes.io/projected/1e96f01e-1b19-4190-9109-75322770d9ba-kube-api-access-ztvzv\") on node \"crc\" DevicePath \"\"" Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.902224 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 11:46:17 crc kubenswrapper[4949]: I0216 11:46:17.902235 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e96f01e-1b19-4190-9109-75322770d9ba-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 11:46:18 crc kubenswrapper[4949]: I0216 11:46:18.191281 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" event={"ID":"1e96f01e-1b19-4190-9109-75322770d9ba","Type":"ContainerDied","Data":"d46e0c06b6805b71fed5880f4413c9de8516ce80cec348269db37f6b4da9c969"} Feb 16 11:46:18 crc kubenswrapper[4949]: I0216 11:46:18.191331 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr" Feb 16 11:46:18 crc kubenswrapper[4949]: I0216 11:46:18.191331 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d46e0c06b6805b71fed5880f4413c9de8516ce80cec348269db37f6b4da9c969" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.062380 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn"] Feb 16 11:46:25 crc kubenswrapper[4949]: E0216 11:46:25.063270 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08ecf914-cb28-4f47-a401-fdce7e6a7f52" containerName="collect-profiles" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.063283 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="08ecf914-cb28-4f47-a401-fdce7e6a7f52" containerName="collect-profiles" Feb 16 11:46:25 crc kubenswrapper[4949]: E0216 11:46:25.063317 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e96f01e-1b19-4190-9109-75322770d9ba" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.063324 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e96f01e-1b19-4190-9109-75322770d9ba" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.063515 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e96f01e-1b19-4190-9109-75322770d9ba" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.063541 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="08ecf914-cb28-4f47-a401-fdce7e6a7f52" containerName="collect-profiles" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.064374 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.071604 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.072070 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.072553 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.079540 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.109188 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn"] Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.223480 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.223602 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl2pq\" (UniqueName: \"kubernetes.io/projected/5cd889af-3e25-4f40-bebf-f1861b7549ed-kube-api-access-jl2pq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.223642 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.235634 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:46:25 crc kubenswrapper[4949]: E0216 11:46:25.235932 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.326223 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.326296 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl2pq\" (UniqueName: \"kubernetes.io/projected/5cd889af-3e25-4f40-bebf-f1861b7549ed-kube-api-access-jl2pq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.326327 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.332970 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.379884 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.394863 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl2pq\" (UniqueName: \"kubernetes.io/projected/5cd889af-3e25-4f40-bebf-f1861b7549ed-kube-api-access-jl2pq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q89fn\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:25 crc kubenswrapper[4949]: I0216 11:46:25.408080 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:46:26 crc kubenswrapper[4949]: I0216 11:46:26.022863 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn"] Feb 16 11:46:26 crc kubenswrapper[4949]: E0216 11:46:26.236729 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:46:26 crc kubenswrapper[4949]: I0216 11:46:26.292288 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" event={"ID":"5cd889af-3e25-4f40-bebf-f1861b7549ed","Type":"ContainerStarted","Data":"ec51c0e907b2330c38fd4df00da8a964f1fa446f727d5b5a14d5cc9a87556c15"} Feb 16 11:46:27 crc kubenswrapper[4949]: I0216 11:46:27.306010 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" event={"ID":"5cd889af-3e25-4f40-bebf-f1861b7549ed","Type":"ContainerStarted","Data":"c281e92f7db7c4d1777a52e5e19367494fe19b25df7fb896a02846fe57f35652"} Feb 16 11:46:27 crc kubenswrapper[4949]: I0216 11:46:27.342839 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" podStartSLOduration=1.769741785 podStartE2EDuration="2.342815581s" podCreationTimestamp="2026-02-16 11:46:25 +0000 UTC" firstStartedPulling="2026-02-16 11:46:26.033085156 +0000 UTC m=+2375.662419321" lastFinishedPulling="2026-02-16 11:46:26.606158952 +0000 UTC m=+2376.235493117" observedRunningTime="2026-02-16 11:46:27.335605736 +0000 UTC m=+2376.964939911" watchObservedRunningTime="2026-02-16 11:46:27.342815581 +0000 UTC m=+2376.972149746" Feb 16 11:46:29 crc kubenswrapper[4949]: E0216 11:46:29.238994 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:46:36 crc kubenswrapper[4949]: I0216 11:46:36.235152 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:46:36 crc kubenswrapper[4949]: E0216 11:46:36.235879 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:46:39 crc kubenswrapper[4949]: E0216 11:46:39.237481 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:46:43 crc kubenswrapper[4949]: E0216 11:46:43.240142 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:46:47 crc kubenswrapper[4949]: I0216 11:46:47.243007 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:46:47 crc kubenswrapper[4949]: E0216 11:46:47.243701 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:46:53 crc kubenswrapper[4949]: E0216 11:46:53.238811 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:46:56 crc kubenswrapper[4949]: E0216 11:46:56.238081 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:46:59 crc kubenswrapper[4949]: I0216 11:46:59.235913 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:46:59 crc kubenswrapper[4949]: E0216 11:46:59.236616 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:47:05 crc kubenswrapper[4949]: E0216 11:47:05.237599 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:47:08 crc kubenswrapper[4949]: E0216 11:47:08.238566 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:47:11 crc kubenswrapper[4949]: I0216 11:47:11.243086 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:47:11 crc kubenswrapper[4949]: E0216 11:47:11.244213 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.068510 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-djv9t"] Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.071860 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.081509 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-djv9t"] Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.269082 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-utilities\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.269227 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-catalog-content\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.269733 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65k49\" (UniqueName: \"kubernetes.io/projected/36140df8-0584-42f3-8dd2-72ae078e0b77-kube-api-access-65k49\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.372421 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65k49\" (UniqueName: \"kubernetes.io/projected/36140df8-0584-42f3-8dd2-72ae078e0b77-kube-api-access-65k49\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.372566 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-utilities\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.372697 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-catalog-content\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.373080 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-utilities\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.373351 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-catalog-content\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.394065 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65k49\" (UniqueName: \"kubernetes.io/projected/36140df8-0584-42f3-8dd2-72ae078e0b77-kube-api-access-65k49\") pod \"redhat-marketplace-djv9t\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.402990 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:17 crc kubenswrapper[4949]: I0216 11:47:17.946836 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-djv9t"] Feb 16 11:47:18 crc kubenswrapper[4949]: I0216 11:47:18.902083 4949 generic.go:334] "Generic (PLEG): container finished" podID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerID="d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d" exitCode=0 Feb 16 11:47:18 crc kubenswrapper[4949]: I0216 11:47:18.902305 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djv9t" event={"ID":"36140df8-0584-42f3-8dd2-72ae078e0b77","Type":"ContainerDied","Data":"d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d"} Feb 16 11:47:18 crc kubenswrapper[4949]: I0216 11:47:18.902447 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djv9t" event={"ID":"36140df8-0584-42f3-8dd2-72ae078e0b77","Type":"ContainerStarted","Data":"86c90afd7fe0e59123f923388cdf0d95b09fc52560ea5f66addd05e386cbebbc"} Feb 16 11:47:19 crc kubenswrapper[4949]: E0216 11:47:19.238207 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:47:19 crc kubenswrapper[4949]: I0216 11:47:19.914637 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djv9t" event={"ID":"36140df8-0584-42f3-8dd2-72ae078e0b77","Type":"ContainerStarted","Data":"3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411"} Feb 16 11:47:20 crc kubenswrapper[4949]: I0216 11:47:20.929944 4949 generic.go:334] "Generic (PLEG): container finished" podID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerID="3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411" exitCode=0 Feb 16 11:47:20 crc kubenswrapper[4949]: I0216 11:47:20.930086 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djv9t" event={"ID":"36140df8-0584-42f3-8dd2-72ae078e0b77","Type":"ContainerDied","Data":"3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411"} Feb 16 11:47:21 crc kubenswrapper[4949]: E0216 11:47:21.263353 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:47:21 crc kubenswrapper[4949]: I0216 11:47:21.943428 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djv9t" event={"ID":"36140df8-0584-42f3-8dd2-72ae078e0b77","Type":"ContainerStarted","Data":"d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d"} Feb 16 11:47:21 crc kubenswrapper[4949]: I0216 11:47:21.975785 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-djv9t" podStartSLOduration=2.4958384909999998 podStartE2EDuration="4.975763754s" podCreationTimestamp="2026-02-16 11:47:17 +0000 UTC" firstStartedPulling="2026-02-16 11:47:18.904385482 +0000 UTC m=+2428.533719667" lastFinishedPulling="2026-02-16 11:47:21.384310765 +0000 UTC m=+2431.013644930" observedRunningTime="2026-02-16 11:47:21.973971983 +0000 UTC m=+2431.603306148" watchObservedRunningTime="2026-02-16 11:47:21.975763754 +0000 UTC m=+2431.605097929" Feb 16 11:47:24 crc kubenswrapper[4949]: I0216 11:47:24.235884 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:47:24 crc kubenswrapper[4949]: E0216 11:47:24.236671 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:47:27 crc kubenswrapper[4949]: I0216 11:47:27.404234 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:27 crc kubenswrapper[4949]: I0216 11:47:27.404841 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:27 crc kubenswrapper[4949]: I0216 11:47:27.504734 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:28 crc kubenswrapper[4949]: I0216 11:47:28.071821 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:28 crc kubenswrapper[4949]: I0216 11:47:28.136499 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-djv9t"] Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.038535 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-djv9t" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerName="registry-server" containerID="cri-o://d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d" gracePeriod=2 Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.636162 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.752742 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-utilities\") pod \"36140df8-0584-42f3-8dd2-72ae078e0b77\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.753099 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65k49\" (UniqueName: \"kubernetes.io/projected/36140df8-0584-42f3-8dd2-72ae078e0b77-kube-api-access-65k49\") pod \"36140df8-0584-42f3-8dd2-72ae078e0b77\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.753246 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-catalog-content\") pod \"36140df8-0584-42f3-8dd2-72ae078e0b77\" (UID: \"36140df8-0584-42f3-8dd2-72ae078e0b77\") " Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.753814 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-utilities" (OuterVolumeSpecName: "utilities") pod "36140df8-0584-42f3-8dd2-72ae078e0b77" (UID: "36140df8-0584-42f3-8dd2-72ae078e0b77"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.754708 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.760191 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36140df8-0584-42f3-8dd2-72ae078e0b77-kube-api-access-65k49" (OuterVolumeSpecName: "kube-api-access-65k49") pod "36140df8-0584-42f3-8dd2-72ae078e0b77" (UID: "36140df8-0584-42f3-8dd2-72ae078e0b77"). InnerVolumeSpecName "kube-api-access-65k49". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.798277 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36140df8-0584-42f3-8dd2-72ae078e0b77" (UID: "36140df8-0584-42f3-8dd2-72ae078e0b77"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.857120 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36140df8-0584-42f3-8dd2-72ae078e0b77-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:47:30 crc kubenswrapper[4949]: I0216 11:47:30.857192 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65k49\" (UniqueName: \"kubernetes.io/projected/36140df8-0584-42f3-8dd2-72ae078e0b77-kube-api-access-65k49\") on node \"crc\" DevicePath \"\"" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.054201 4949 generic.go:334] "Generic (PLEG): container finished" podID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerID="d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d" exitCode=0 Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.054252 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djv9t" event={"ID":"36140df8-0584-42f3-8dd2-72ae078e0b77","Type":"ContainerDied","Data":"d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d"} Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.054282 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djv9t" event={"ID":"36140df8-0584-42f3-8dd2-72ae078e0b77","Type":"ContainerDied","Data":"86c90afd7fe0e59123f923388cdf0d95b09fc52560ea5f66addd05e386cbebbc"} Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.054306 4949 scope.go:117] "RemoveContainer" containerID="d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.054506 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djv9t" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.102505 4949 scope.go:117] "RemoveContainer" containerID="3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.103795 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-djv9t"] Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.116667 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-djv9t"] Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.138886 4949 scope.go:117] "RemoveContainer" containerID="d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.215314 4949 scope.go:117] "RemoveContainer" containerID="d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d" Feb 16 11:47:31 crc kubenswrapper[4949]: E0216 11:47:31.215920 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d\": container with ID starting with d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d not found: ID does not exist" containerID="d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.215952 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d"} err="failed to get container status \"d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d\": rpc error: code = NotFound desc = could not find container \"d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d\": container with ID starting with d4444c0a4a70430eca78389ca6a4106518b7f0800742cfcfd98dd3b100b8ac9d not found: ID does not exist" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.215972 4949 scope.go:117] "RemoveContainer" containerID="3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411" Feb 16 11:47:31 crc kubenswrapper[4949]: E0216 11:47:31.216462 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411\": container with ID starting with 3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411 not found: ID does not exist" containerID="3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.216488 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411"} err="failed to get container status \"3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411\": rpc error: code = NotFound desc = could not find container \"3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411\": container with ID starting with 3b3188a3194e62d93e1de8ffc1734145c66f35c67c1e5180e8b7dae4fc5be411 not found: ID does not exist" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.216501 4949 scope.go:117] "RemoveContainer" containerID="d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d" Feb 16 11:47:31 crc kubenswrapper[4949]: E0216 11:47:31.216937 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d\": container with ID starting with d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d not found: ID does not exist" containerID="d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.217000 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d"} err="failed to get container status \"d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d\": rpc error: code = NotFound desc = could not find container \"d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d\": container with ID starting with d37e4e662993bd28344885b2a0a59742901a65de371f7cfb8d6418226704e36d not found: ID does not exist" Feb 16 11:47:31 crc kubenswrapper[4949]: I0216 11:47:31.255513 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" path="/var/lib/kubelet/pods/36140df8-0584-42f3-8dd2-72ae078e0b77/volumes" Feb 16 11:47:32 crc kubenswrapper[4949]: E0216 11:47:32.238346 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:47:33 crc kubenswrapper[4949]: E0216 11:47:33.253450 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:47:35 crc kubenswrapper[4949]: I0216 11:47:35.236285 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:47:35 crc kubenswrapper[4949]: E0216 11:47:35.237246 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:47:46 crc kubenswrapper[4949]: E0216 11:47:46.240606 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:47:48 crc kubenswrapper[4949]: I0216 11:47:48.235384 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:47:48 crc kubenswrapper[4949]: E0216 11:47:48.235992 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:47:48 crc kubenswrapper[4949]: E0216 11:47:48.237142 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:48:00 crc kubenswrapper[4949]: I0216 11:48:00.236345 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:48:00 crc kubenswrapper[4949]: E0216 11:48:00.237537 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:48:01 crc kubenswrapper[4949]: E0216 11:48:01.246089 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:48:02 crc kubenswrapper[4949]: E0216 11:48:02.238065 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:48:13 crc kubenswrapper[4949]: E0216 11:48:13.237474 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:48:13 crc kubenswrapper[4949]: E0216 11:48:13.237532 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:48:15 crc kubenswrapper[4949]: I0216 11:48:15.236706 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:48:15 crc kubenswrapper[4949]: E0216 11:48:15.237707 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:48:24 crc kubenswrapper[4949]: E0216 11:48:24.239922 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:48:27 crc kubenswrapper[4949]: E0216 11:48:27.238068 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:48:29 crc kubenswrapper[4949]: I0216 11:48:29.235857 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:48:29 crc kubenswrapper[4949]: E0216 11:48:29.236385 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:48:37 crc kubenswrapper[4949]: E0216 11:48:37.240564 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:48:38 crc kubenswrapper[4949]: E0216 11:48:38.238092 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:48:41 crc kubenswrapper[4949]: I0216 11:48:41.249075 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:48:41 crc kubenswrapper[4949]: E0216 11:48:41.250132 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:48:50 crc kubenswrapper[4949]: E0216 11:48:50.238224 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:48:51 crc kubenswrapper[4949]: E0216 11:48:51.264045 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:48:52 crc kubenswrapper[4949]: I0216 11:48:52.235869 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:48:52 crc kubenswrapper[4949]: E0216 11:48:52.236705 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:49:02 crc kubenswrapper[4949]: E0216 11:49:02.239674 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:49:06 crc kubenswrapper[4949]: E0216 11:49:06.240959 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:49:07 crc kubenswrapper[4949]: I0216 11:49:07.236435 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:49:07 crc kubenswrapper[4949]: E0216 11:49:07.237007 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:49:16 crc kubenswrapper[4949]: E0216 11:49:16.237809 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:49:17 crc kubenswrapper[4949]: E0216 11:49:17.237801 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:49:20 crc kubenswrapper[4949]: I0216 11:49:20.235678 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:49:20 crc kubenswrapper[4949]: E0216 11:49:20.237843 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:49:30 crc kubenswrapper[4949]: E0216 11:49:30.240980 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:49:31 crc kubenswrapper[4949]: I0216 11:49:31.246115 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:49:31 crc kubenswrapper[4949]: E0216 11:49:31.246783 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:49:31 crc kubenswrapper[4949]: E0216 11:49:31.247454 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:49:42 crc kubenswrapper[4949]: E0216 11:49:42.239663 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:49:44 crc kubenswrapper[4949]: E0216 11:49:44.239139 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:49:46 crc kubenswrapper[4949]: I0216 11:49:46.240496 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:49:46 crc kubenswrapper[4949]: E0216 11:49:46.241096 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:49:55 crc kubenswrapper[4949]: E0216 11:49:55.242397 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:49:58 crc kubenswrapper[4949]: E0216 11:49:58.238072 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:49:59 crc kubenswrapper[4949]: I0216 11:49:59.236054 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:49:59 crc kubenswrapper[4949]: E0216 11:49:59.236573 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:50:08 crc kubenswrapper[4949]: E0216 11:50:08.239057 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:50:12 crc kubenswrapper[4949]: E0216 11:50:12.237053 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:50:13 crc kubenswrapper[4949]: I0216 11:50:13.235608 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:50:13 crc kubenswrapper[4949]: E0216 11:50:13.236667 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:50:23 crc kubenswrapper[4949]: E0216 11:50:23.239208 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:50:23 crc kubenswrapper[4949]: E0216 11:50:23.239258 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:50:25 crc kubenswrapper[4949]: I0216 11:50:25.235748 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:50:25 crc kubenswrapper[4949]: E0216 11:50:25.236779 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:50:35 crc kubenswrapper[4949]: E0216 11:50:35.238656 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:50:37 crc kubenswrapper[4949]: E0216 11:50:37.238429 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:50:38 crc kubenswrapper[4949]: I0216 11:50:38.236292 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:50:39 crc kubenswrapper[4949]: I0216 11:50:39.466574 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"b2547e29673da8edc36fedc7e79471a698f99d8dbbbcf9abc8645e00ea426f97"} Feb 16 11:50:48 crc kubenswrapper[4949]: E0216 11:50:48.240063 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:50:51 crc kubenswrapper[4949]: E0216 11:50:51.260048 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:51:02 crc kubenswrapper[4949]: E0216 11:51:02.239240 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:51:03 crc kubenswrapper[4949]: E0216 11:51:03.237748 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:51:17 crc kubenswrapper[4949]: I0216 11:51:17.238028 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:51:17 crc kubenswrapper[4949]: E0216 11:51:17.356830 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:51:17 crc kubenswrapper[4949]: E0216 11:51:17.356894 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:51:17 crc kubenswrapper[4949]: E0216 11:51:17.357042 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:51:17 crc kubenswrapper[4949]: E0216 11:51:17.358227 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:51:17 crc kubenswrapper[4949]: E0216 11:51:17.370519 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:51:17 crc kubenswrapper[4949]: E0216 11:51:17.370563 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:51:17 crc kubenswrapper[4949]: E0216 11:51:17.370700 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:51:17 crc kubenswrapper[4949]: E0216 11:51:17.372278 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:51:29 crc kubenswrapper[4949]: E0216 11:51:29.243162 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:51:32 crc kubenswrapper[4949]: E0216 11:51:32.238717 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:51:40 crc kubenswrapper[4949]: E0216 11:51:40.238970 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:51:43 crc kubenswrapper[4949]: E0216 11:51:43.238816 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:51:54 crc kubenswrapper[4949]: E0216 11:51:54.237817 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:51:56 crc kubenswrapper[4949]: E0216 11:51:56.238423 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:52:05 crc kubenswrapper[4949]: E0216 11:52:05.238066 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:52:09 crc kubenswrapper[4949]: E0216 11:52:09.240018 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:52:16 crc kubenswrapper[4949]: E0216 11:52:16.238138 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.590456 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5644j"] Feb 16 11:52:18 crc kubenswrapper[4949]: E0216 11:52:18.591649 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerName="extract-content" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.591669 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerName="extract-content" Feb 16 11:52:18 crc kubenswrapper[4949]: E0216 11:52:18.591699 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerName="registry-server" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.591708 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerName="registry-server" Feb 16 11:52:18 crc kubenswrapper[4949]: E0216 11:52:18.591719 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerName="extract-utilities" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.591729 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerName="extract-utilities" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.592022 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="36140df8-0584-42f3-8dd2-72ae078e0b77" containerName="registry-server" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.594157 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.607064 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5644j"] Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.705995 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lm5l4\" (UniqueName: \"kubernetes.io/projected/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-kube-api-access-lm5l4\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.706057 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-utilities\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.706488 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-catalog-content\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.809282 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lm5l4\" (UniqueName: \"kubernetes.io/projected/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-kube-api-access-lm5l4\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.809337 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-utilities\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.809451 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-catalog-content\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.809958 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-utilities\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.810052 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-catalog-content\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.833231 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lm5l4\" (UniqueName: \"kubernetes.io/projected/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-kube-api-access-lm5l4\") pod \"redhat-operators-5644j\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:18 crc kubenswrapper[4949]: I0216 11:52:18.924469 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:19 crc kubenswrapper[4949]: I0216 11:52:19.491920 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5644j"] Feb 16 11:52:19 crc kubenswrapper[4949]: I0216 11:52:19.660906 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5644j" event={"ID":"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34","Type":"ContainerStarted","Data":"0b1c43ecf0e525f721c3a50d61374d86bd0a1d8d03c88e3e9677e03384c14ef6"} Feb 16 11:52:20 crc kubenswrapper[4949]: E0216 11:52:20.236937 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:52:20 crc kubenswrapper[4949]: I0216 11:52:20.671702 4949 generic.go:334] "Generic (PLEG): container finished" podID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerID="deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0" exitCode=0 Feb 16 11:52:20 crc kubenswrapper[4949]: I0216 11:52:20.671825 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5644j" event={"ID":"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34","Type":"ContainerDied","Data":"deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0"} Feb 16 11:52:21 crc kubenswrapper[4949]: I0216 11:52:21.683869 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5644j" event={"ID":"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34","Type":"ContainerStarted","Data":"ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6"} Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.663048 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tltsq"] Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.667799 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.679266 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tltsq"] Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.773968 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5zzh\" (UniqueName: \"kubernetes.io/projected/365b2e10-f522-48d9-a28e-0ab3b0c2a734-kube-api-access-w5zzh\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.774250 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-catalog-content\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.774543 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-utilities\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.876854 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-catalog-content\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.877057 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-utilities\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.877200 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5zzh\" (UniqueName: \"kubernetes.io/projected/365b2e10-f522-48d9-a28e-0ab3b0c2a734-kube-api-access-w5zzh\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.877854 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-catalog-content\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.877990 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-utilities\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:24 crc kubenswrapper[4949]: I0216 11:52:24.899065 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5zzh\" (UniqueName: \"kubernetes.io/projected/365b2e10-f522-48d9-a28e-0ab3b0c2a734-kube-api-access-w5zzh\") pod \"certified-operators-tltsq\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:25 crc kubenswrapper[4949]: I0216 11:52:25.003529 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:25 crc kubenswrapper[4949]: W0216 11:52:25.698250 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod365b2e10_f522_48d9_a28e_0ab3b0c2a734.slice/crio-d4eadfbfcbe06f12b2ff7723b5918fd4773533103b733d4cc93cadef026acd56 WatchSource:0}: Error finding container d4eadfbfcbe06f12b2ff7723b5918fd4773533103b733d4cc93cadef026acd56: Status 404 returned error can't find the container with id d4eadfbfcbe06f12b2ff7723b5918fd4773533103b733d4cc93cadef026acd56 Feb 16 11:52:25 crc kubenswrapper[4949]: I0216 11:52:25.699668 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tltsq"] Feb 16 11:52:25 crc kubenswrapper[4949]: I0216 11:52:25.725361 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tltsq" event={"ID":"365b2e10-f522-48d9-a28e-0ab3b0c2a734","Type":"ContainerStarted","Data":"d4eadfbfcbe06f12b2ff7723b5918fd4773533103b733d4cc93cadef026acd56"} Feb 16 11:52:26 crc kubenswrapper[4949]: I0216 11:52:26.737075 4949 generic.go:334] "Generic (PLEG): container finished" podID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerID="ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6" exitCode=0 Feb 16 11:52:26 crc kubenswrapper[4949]: I0216 11:52:26.737140 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5644j" event={"ID":"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34","Type":"ContainerDied","Data":"ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6"} Feb 16 11:52:26 crc kubenswrapper[4949]: I0216 11:52:26.741864 4949 generic.go:334] "Generic (PLEG): container finished" podID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerID="f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1" exitCode=0 Feb 16 11:52:26 crc kubenswrapper[4949]: I0216 11:52:26.741916 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tltsq" event={"ID":"365b2e10-f522-48d9-a28e-0ab3b0c2a734","Type":"ContainerDied","Data":"f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1"} Feb 16 11:52:27 crc kubenswrapper[4949]: I0216 11:52:27.759386 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tltsq" event={"ID":"365b2e10-f522-48d9-a28e-0ab3b0c2a734","Type":"ContainerStarted","Data":"07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1"} Feb 16 11:52:27 crc kubenswrapper[4949]: I0216 11:52:27.766163 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5644j" event={"ID":"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34","Type":"ContainerStarted","Data":"10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c"} Feb 16 11:52:27 crc kubenswrapper[4949]: I0216 11:52:27.808315 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5644j" podStartSLOduration=3.361595924 podStartE2EDuration="9.808294185s" podCreationTimestamp="2026-02-16 11:52:18 +0000 UTC" firstStartedPulling="2026-02-16 11:52:20.674204652 +0000 UTC m=+2730.303538817" lastFinishedPulling="2026-02-16 11:52:27.120902913 +0000 UTC m=+2736.750237078" observedRunningTime="2026-02-16 11:52:27.80287109 +0000 UTC m=+2737.432205255" watchObservedRunningTime="2026-02-16 11:52:27.808294185 +0000 UTC m=+2737.437628360" Feb 16 11:52:28 crc kubenswrapper[4949]: E0216 11:52:28.237844 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:52:28 crc kubenswrapper[4949]: I0216 11:52:28.925167 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:28 crc kubenswrapper[4949]: I0216 11:52:28.926698 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:30 crc kubenswrapper[4949]: I0216 11:52:30.040778 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5644j" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="registry-server" probeResult="failure" output=< Feb 16 11:52:30 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:52:30 crc kubenswrapper[4949]: > Feb 16 11:52:30 crc kubenswrapper[4949]: I0216 11:52:30.815133 4949 generic.go:334] "Generic (PLEG): container finished" podID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerID="07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1" exitCode=0 Feb 16 11:52:30 crc kubenswrapper[4949]: I0216 11:52:30.815207 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tltsq" event={"ID":"365b2e10-f522-48d9-a28e-0ab3b0c2a734","Type":"ContainerDied","Data":"07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1"} Feb 16 11:52:31 crc kubenswrapper[4949]: I0216 11:52:31.834134 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tltsq" event={"ID":"365b2e10-f522-48d9-a28e-0ab3b0c2a734","Type":"ContainerStarted","Data":"8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499"} Feb 16 11:52:31 crc kubenswrapper[4949]: I0216 11:52:31.855631 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tltsq" podStartSLOduration=3.362112627 podStartE2EDuration="7.855616603s" podCreationTimestamp="2026-02-16 11:52:24 +0000 UTC" firstStartedPulling="2026-02-16 11:52:26.744058666 +0000 UTC m=+2736.373392821" lastFinishedPulling="2026-02-16 11:52:31.237562632 +0000 UTC m=+2740.866896797" observedRunningTime="2026-02-16 11:52:31.854773999 +0000 UTC m=+2741.484108164" watchObservedRunningTime="2026-02-16 11:52:31.855616603 +0000 UTC m=+2741.484950768" Feb 16 11:52:33 crc kubenswrapper[4949]: E0216 11:52:33.237988 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:52:35 crc kubenswrapper[4949]: I0216 11:52:35.004160 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:35 crc kubenswrapper[4949]: I0216 11:52:35.004535 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:35 crc kubenswrapper[4949]: I0216 11:52:35.056710 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:39 crc kubenswrapper[4949]: I0216 11:52:39.982889 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5644j" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="registry-server" probeResult="failure" output=< Feb 16 11:52:39 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 11:52:39 crc kubenswrapper[4949]: > Feb 16 11:52:40 crc kubenswrapper[4949]: I0216 11:52:40.930857 4949 generic.go:334] "Generic (PLEG): container finished" podID="5cd889af-3e25-4f40-bebf-f1861b7549ed" containerID="c281e92f7db7c4d1777a52e5e19367494fe19b25df7fb896a02846fe57f35652" exitCode=2 Feb 16 11:52:40 crc kubenswrapper[4949]: I0216 11:52:40.930897 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" event={"ID":"5cd889af-3e25-4f40-bebf-f1861b7549ed","Type":"ContainerDied","Data":"c281e92f7db7c4d1777a52e5e19367494fe19b25df7fb896a02846fe57f35652"} Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.507741 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.633886 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-ssh-key-openstack-edpm-ipam\") pod \"5cd889af-3e25-4f40-bebf-f1861b7549ed\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.634192 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-inventory\") pod \"5cd889af-3e25-4f40-bebf-f1861b7549ed\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.634279 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jl2pq\" (UniqueName: \"kubernetes.io/projected/5cd889af-3e25-4f40-bebf-f1861b7549ed-kube-api-access-jl2pq\") pod \"5cd889af-3e25-4f40-bebf-f1861b7549ed\" (UID: \"5cd889af-3e25-4f40-bebf-f1861b7549ed\") " Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.640584 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cd889af-3e25-4f40-bebf-f1861b7549ed-kube-api-access-jl2pq" (OuterVolumeSpecName: "kube-api-access-jl2pq") pod "5cd889af-3e25-4f40-bebf-f1861b7549ed" (UID: "5cd889af-3e25-4f40-bebf-f1861b7549ed"). InnerVolumeSpecName "kube-api-access-jl2pq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.666506 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-inventory" (OuterVolumeSpecName: "inventory") pod "5cd889af-3e25-4f40-bebf-f1861b7549ed" (UID: "5cd889af-3e25-4f40-bebf-f1861b7549ed"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.674651 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "5cd889af-3e25-4f40-bebf-f1861b7549ed" (UID: "5cd889af-3e25-4f40-bebf-f1861b7549ed"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.736785 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.736821 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jl2pq\" (UniqueName: \"kubernetes.io/projected/5cd889af-3e25-4f40-bebf-f1861b7549ed-kube-api-access-jl2pq\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.736839 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5cd889af-3e25-4f40-bebf-f1861b7549ed-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.950059 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" event={"ID":"5cd889af-3e25-4f40-bebf-f1861b7549ed","Type":"ContainerDied","Data":"ec51c0e907b2330c38fd4df00da8a964f1fa446f727d5b5a14d5cc9a87556c15"} Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.950538 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec51c0e907b2330c38fd4df00da8a964f1fa446f727d5b5a14d5cc9a87556c15" Feb 16 11:52:42 crc kubenswrapper[4949]: I0216 11:52:42.950132 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q89fn" Feb 16 11:52:43 crc kubenswrapper[4949]: E0216 11:52:43.239078 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:52:44 crc kubenswrapper[4949]: E0216 11:52:44.237152 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:52:45 crc kubenswrapper[4949]: I0216 11:52:45.063957 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:45 crc kubenswrapper[4949]: I0216 11:52:45.125894 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tltsq"] Feb 16 11:52:45 crc kubenswrapper[4949]: I0216 11:52:45.981852 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tltsq" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerName="registry-server" containerID="cri-o://8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499" gracePeriod=2 Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.635564 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.640157 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5zzh\" (UniqueName: \"kubernetes.io/projected/365b2e10-f522-48d9-a28e-0ab3b0c2a734-kube-api-access-w5zzh\") pod \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.640380 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-catalog-content\") pod \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.640401 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-utilities\") pod \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\" (UID: \"365b2e10-f522-48d9-a28e-0ab3b0c2a734\") " Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.641849 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-utilities" (OuterVolumeSpecName: "utilities") pod "365b2e10-f522-48d9-a28e-0ab3b0c2a734" (UID: "365b2e10-f522-48d9-a28e-0ab3b0c2a734"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.657100 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/365b2e10-f522-48d9-a28e-0ab3b0c2a734-kube-api-access-w5zzh" (OuterVolumeSpecName: "kube-api-access-w5zzh") pod "365b2e10-f522-48d9-a28e-0ab3b0c2a734" (UID: "365b2e10-f522-48d9-a28e-0ab3b0c2a734"). InnerVolumeSpecName "kube-api-access-w5zzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.709702 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "365b2e10-f522-48d9-a28e-0ab3b0c2a734" (UID: "365b2e10-f522-48d9-a28e-0ab3b0c2a734"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.743339 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5zzh\" (UniqueName: \"kubernetes.io/projected/365b2e10-f522-48d9-a28e-0ab3b0c2a734-kube-api-access-w5zzh\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.743379 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:46 crc kubenswrapper[4949]: I0216 11:52:46.743390 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/365b2e10-f522-48d9-a28e-0ab3b0c2a734-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.008226 4949 generic.go:334] "Generic (PLEG): container finished" podID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerID="8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499" exitCode=0 Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.008268 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tltsq" event={"ID":"365b2e10-f522-48d9-a28e-0ab3b0c2a734","Type":"ContainerDied","Data":"8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499"} Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.008391 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tltsq" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.008440 4949 scope.go:117] "RemoveContainer" containerID="8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.008449 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tltsq" event={"ID":"365b2e10-f522-48d9-a28e-0ab3b0c2a734","Type":"ContainerDied","Data":"d4eadfbfcbe06f12b2ff7723b5918fd4773533103b733d4cc93cadef026acd56"} Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.047304 4949 scope.go:117] "RemoveContainer" containerID="07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.071323 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tltsq"] Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.077340 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tltsq"] Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.102278 4949 scope.go:117] "RemoveContainer" containerID="f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.157546 4949 scope.go:117] "RemoveContainer" containerID="8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499" Feb 16 11:52:47 crc kubenswrapper[4949]: E0216 11:52:47.158327 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499\": container with ID starting with 8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499 not found: ID does not exist" containerID="8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.158368 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499"} err="failed to get container status \"8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499\": rpc error: code = NotFound desc = could not find container \"8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499\": container with ID starting with 8d8a0061f61d7fe209e8e0c914cdd3d0f84f8d7e54922a01d5b150b8cc519499 not found: ID does not exist" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.158396 4949 scope.go:117] "RemoveContainer" containerID="07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1" Feb 16 11:52:47 crc kubenswrapper[4949]: E0216 11:52:47.158807 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1\": container with ID starting with 07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1 not found: ID does not exist" containerID="07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.158841 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1"} err="failed to get container status \"07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1\": rpc error: code = NotFound desc = could not find container \"07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1\": container with ID starting with 07491bcd6b174226fd2a400109dd78ef20af14400f2bacbe353cefd91bee5ee1 not found: ID does not exist" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.158867 4949 scope.go:117] "RemoveContainer" containerID="f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1" Feb 16 11:52:47 crc kubenswrapper[4949]: E0216 11:52:47.159104 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1\": container with ID starting with f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1 not found: ID does not exist" containerID="f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.159119 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1"} err="failed to get container status \"f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1\": rpc error: code = NotFound desc = could not find container \"f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1\": container with ID starting with f6d4b97d0f570c0bc0a335a4cb22573d5b911145462d1d006afe7bcf010087e1 not found: ID does not exist" Feb 16 11:52:47 crc kubenswrapper[4949]: I0216 11:52:47.258210 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" path="/var/lib/kubelet/pods/365b2e10-f522-48d9-a28e-0ab3b0c2a734/volumes" Feb 16 11:52:48 crc kubenswrapper[4949]: I0216 11:52:48.989028 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:49 crc kubenswrapper[4949]: I0216 11:52:49.085102 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:49 crc kubenswrapper[4949]: I0216 11:52:49.709124 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5644j"] Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.044254 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5644j" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="registry-server" containerID="cri-o://10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c" gracePeriod=2 Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.678706 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.754317 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-catalog-content\") pod \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.754612 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-utilities\") pod \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.754674 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lm5l4\" (UniqueName: \"kubernetes.io/projected/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-kube-api-access-lm5l4\") pod \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\" (UID: \"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34\") " Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.755617 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-utilities" (OuterVolumeSpecName: "utilities") pod "142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" (UID: "142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.761709 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-kube-api-access-lm5l4" (OuterVolumeSpecName: "kube-api-access-lm5l4") pod "142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" (UID: "142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34"). InnerVolumeSpecName "kube-api-access-lm5l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.857353 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.857397 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lm5l4\" (UniqueName: \"kubernetes.io/projected/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-kube-api-access-lm5l4\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.896752 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" (UID: "142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:52:50 crc kubenswrapper[4949]: I0216 11:52:50.958969 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.056905 4949 generic.go:334] "Generic (PLEG): container finished" podID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerID="10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c" exitCode=0 Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.056946 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5644j" event={"ID":"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34","Type":"ContainerDied","Data":"10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c"} Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.056973 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5644j" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.056989 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5644j" event={"ID":"142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34","Type":"ContainerDied","Data":"0b1c43ecf0e525f721c3a50d61374d86bd0a1d8d03c88e3e9677e03384c14ef6"} Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.057008 4949 scope.go:117] "RemoveContainer" containerID="10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.095925 4949 scope.go:117] "RemoveContainer" containerID="ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.101953 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5644j"] Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.115914 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5644j"] Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.136483 4949 scope.go:117] "RemoveContainer" containerID="deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.188119 4949 scope.go:117] "RemoveContainer" containerID="10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c" Feb 16 11:52:51 crc kubenswrapper[4949]: E0216 11:52:51.188797 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c\": container with ID starting with 10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c not found: ID does not exist" containerID="10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.188829 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c"} err="failed to get container status \"10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c\": rpc error: code = NotFound desc = could not find container \"10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c\": container with ID starting with 10cae5b450d91d64fd1878604094e0b24d0229f19923d6cfeea9cf7e8b28300c not found: ID does not exist" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.188851 4949 scope.go:117] "RemoveContainer" containerID="ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6" Feb 16 11:52:51 crc kubenswrapper[4949]: E0216 11:52:51.189388 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6\": container with ID starting with ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6 not found: ID does not exist" containerID="ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.189426 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6"} err="failed to get container status \"ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6\": rpc error: code = NotFound desc = could not find container \"ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6\": container with ID starting with ab9d555dcb80631297c5c4ab15a54dfee68f3fbeed08d9449eb542c83fdbc8c6 not found: ID does not exist" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.189468 4949 scope.go:117] "RemoveContainer" containerID="deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0" Feb 16 11:52:51 crc kubenswrapper[4949]: E0216 11:52:51.189782 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0\": container with ID starting with deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0 not found: ID does not exist" containerID="deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.189849 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0"} err="failed to get container status \"deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0\": rpc error: code = NotFound desc = could not find container \"deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0\": container with ID starting with deebd1da90570cce4f00b996ca9a5378acc9e63242b376c9fb96ffc0aed482e0 not found: ID does not exist" Feb 16 11:52:51 crc kubenswrapper[4949]: I0216 11:52:51.252550 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" path="/var/lib/kubelet/pods/142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34/volumes" Feb 16 11:52:57 crc kubenswrapper[4949]: E0216 11:52:57.239641 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.512082 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z7dbc"] Feb 16 11:52:57 crc kubenswrapper[4949]: E0216 11:52:57.512919 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cd889af-3e25-4f40-bebf-f1861b7549ed" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.512946 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cd889af-3e25-4f40-bebf-f1861b7549ed" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:52:57 crc kubenswrapper[4949]: E0216 11:52:57.512963 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerName="extract-utilities" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.512971 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerName="extract-utilities" Feb 16 11:52:57 crc kubenswrapper[4949]: E0216 11:52:57.512991 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="extract-utilities" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.512998 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="extract-utilities" Feb 16 11:52:57 crc kubenswrapper[4949]: E0216 11:52:57.513024 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerName="extract-content" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.513031 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerName="extract-content" Feb 16 11:52:57 crc kubenswrapper[4949]: E0216 11:52:57.513048 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="extract-content" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.513055 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="extract-content" Feb 16 11:52:57 crc kubenswrapper[4949]: E0216 11:52:57.513067 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="registry-server" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.513076 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="registry-server" Feb 16 11:52:57 crc kubenswrapper[4949]: E0216 11:52:57.513105 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerName="registry-server" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.513112 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerName="registry-server" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.513403 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cd889af-3e25-4f40-bebf-f1861b7549ed" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.513430 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="365b2e10-f522-48d9-a28e-0ab3b0c2a734" containerName="registry-server" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.513445 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="142fa5e5-7ec3-44a4-82f4-6fc8c38d2b34" containerName="registry-server" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.517061 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.546986 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z7dbc"] Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.610331 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42zs5\" (UniqueName: \"kubernetes.io/projected/7b6a888c-2896-4125-9947-9a31b5091820-kube-api-access-42zs5\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.610423 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-catalog-content\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.610453 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-utilities\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.712686 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42zs5\" (UniqueName: \"kubernetes.io/projected/7b6a888c-2896-4125-9947-9a31b5091820-kube-api-access-42zs5\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.712767 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-catalog-content\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.712794 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-utilities\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.713354 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-utilities\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.713466 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-catalog-content\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.734512 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42zs5\" (UniqueName: \"kubernetes.io/projected/7b6a888c-2896-4125-9947-9a31b5091820-kube-api-access-42zs5\") pod \"community-operators-z7dbc\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:57 crc kubenswrapper[4949]: I0216 11:52:57.847442 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:52:58 crc kubenswrapper[4949]: E0216 11:52:58.238232 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:52:58 crc kubenswrapper[4949]: I0216 11:52:58.427574 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z7dbc"] Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.042266 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b"] Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.044711 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.051026 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.051128 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.054238 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.054492 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.062074 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b"] Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.153651 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.153790 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.153851 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdc4k\" (UniqueName: \"kubernetes.io/projected/4583cb4e-c70d-4638-a948-75a5f5cfc593-kube-api-access-fdc4k\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.166385 4949 generic.go:334] "Generic (PLEG): container finished" podID="7b6a888c-2896-4125-9947-9a31b5091820" containerID="22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3" exitCode=0 Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.166439 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7dbc" event={"ID":"7b6a888c-2896-4125-9947-9a31b5091820","Type":"ContainerDied","Data":"22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3"} Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.166467 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7dbc" event={"ID":"7b6a888c-2896-4125-9947-9a31b5091820","Type":"ContainerStarted","Data":"f7aeec26a8216c4de50a619ecb2e0ed1c35308e0575bd2fbbdc021565e6e6d47"} Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.255558 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.255928 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdc4k\" (UniqueName: \"kubernetes.io/projected/4583cb4e-c70d-4638-a948-75a5f5cfc593-kube-api-access-fdc4k\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.256074 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.276101 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdc4k\" (UniqueName: \"kubernetes.io/projected/4583cb4e-c70d-4638-a948-75a5f5cfc593-kube-api-access-fdc4k\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.276903 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.282724 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:52:59 crc kubenswrapper[4949]: I0216 11:52:59.391160 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:53:00 crc kubenswrapper[4949]: I0216 11:53:00.011209 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b"] Feb 16 11:53:00 crc kubenswrapper[4949]: I0216 11:53:00.239328 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" event={"ID":"4583cb4e-c70d-4638-a948-75a5f5cfc593","Type":"ContainerStarted","Data":"306f0444a22348b30309331450fe35c9570e1341e191e4aa800b2d35349c593b"} Feb 16 11:53:01 crc kubenswrapper[4949]: I0216 11:53:01.249886 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" event={"ID":"4583cb4e-c70d-4638-a948-75a5f5cfc593","Type":"ContainerStarted","Data":"fbe0215a914da3a46ba22035bed8caa512c42afa3c1d94670173b94c2034b582"} Feb 16 11:53:01 crc kubenswrapper[4949]: I0216 11:53:01.255189 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7dbc" event={"ID":"7b6a888c-2896-4125-9947-9a31b5091820","Type":"ContainerStarted","Data":"a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a"} Feb 16 11:53:01 crc kubenswrapper[4949]: I0216 11:53:01.283207 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" podStartSLOduration=1.849603833 podStartE2EDuration="2.283165858s" podCreationTimestamp="2026-02-16 11:52:59 +0000 UTC" firstStartedPulling="2026-02-16 11:53:00.028927556 +0000 UTC m=+2769.658261721" lastFinishedPulling="2026-02-16 11:53:00.462489571 +0000 UTC m=+2770.091823746" observedRunningTime="2026-02-16 11:53:01.275364055 +0000 UTC m=+2770.904698210" watchObservedRunningTime="2026-02-16 11:53:01.283165858 +0000 UTC m=+2770.912500023" Feb 16 11:53:02 crc kubenswrapper[4949]: I0216 11:53:02.271566 4949 generic.go:334] "Generic (PLEG): container finished" podID="7b6a888c-2896-4125-9947-9a31b5091820" containerID="a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a" exitCode=0 Feb 16 11:53:02 crc kubenswrapper[4949]: I0216 11:53:02.271876 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7dbc" event={"ID":"7b6a888c-2896-4125-9947-9a31b5091820","Type":"ContainerDied","Data":"a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a"} Feb 16 11:53:03 crc kubenswrapper[4949]: I0216 11:53:03.295097 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7dbc" event={"ID":"7b6a888c-2896-4125-9947-9a31b5091820","Type":"ContainerStarted","Data":"63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea"} Feb 16 11:53:03 crc kubenswrapper[4949]: I0216 11:53:03.328903 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z7dbc" podStartSLOduration=2.797719247 podStartE2EDuration="6.328881453s" podCreationTimestamp="2026-02-16 11:52:57 +0000 UTC" firstStartedPulling="2026-02-16 11:52:59.171878442 +0000 UTC m=+2768.801212607" lastFinishedPulling="2026-02-16 11:53:02.703040648 +0000 UTC m=+2772.332374813" observedRunningTime="2026-02-16 11:53:03.317622271 +0000 UTC m=+2772.946956436" watchObservedRunningTime="2026-02-16 11:53:03.328881453 +0000 UTC m=+2772.958215618" Feb 16 11:53:04 crc kubenswrapper[4949]: I0216 11:53:04.550064 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:53:04 crc kubenswrapper[4949]: I0216 11:53:04.550452 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:53:07 crc kubenswrapper[4949]: I0216 11:53:07.848099 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:53:07 crc kubenswrapper[4949]: I0216 11:53:07.848736 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:53:07 crc kubenswrapper[4949]: I0216 11:53:07.910708 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:53:08 crc kubenswrapper[4949]: I0216 11:53:08.395368 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:53:08 crc kubenswrapper[4949]: I0216 11:53:08.469899 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z7dbc"] Feb 16 11:53:10 crc kubenswrapper[4949]: E0216 11:53:10.239320 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:53:10 crc kubenswrapper[4949]: I0216 11:53:10.367073 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z7dbc" podUID="7b6a888c-2896-4125-9947-9a31b5091820" containerName="registry-server" containerID="cri-o://63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea" gracePeriod=2 Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.029913 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.218915 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-catalog-content\") pod \"7b6a888c-2896-4125-9947-9a31b5091820\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.218990 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-utilities\") pod \"7b6a888c-2896-4125-9947-9a31b5091820\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.219249 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42zs5\" (UniqueName: \"kubernetes.io/projected/7b6a888c-2896-4125-9947-9a31b5091820-kube-api-access-42zs5\") pod \"7b6a888c-2896-4125-9947-9a31b5091820\" (UID: \"7b6a888c-2896-4125-9947-9a31b5091820\") " Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.221686 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-utilities" (OuterVolumeSpecName: "utilities") pod "7b6a888c-2896-4125-9947-9a31b5091820" (UID: "7b6a888c-2896-4125-9947-9a31b5091820"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.225137 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b6a888c-2896-4125-9947-9a31b5091820-kube-api-access-42zs5" (OuterVolumeSpecName: "kube-api-access-42zs5") pod "7b6a888c-2896-4125-9947-9a31b5091820" (UID: "7b6a888c-2896-4125-9947-9a31b5091820"). InnerVolumeSpecName "kube-api-access-42zs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:53:11 crc kubenswrapper[4949]: E0216 11:53:11.250309 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.275671 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7b6a888c-2896-4125-9947-9a31b5091820" (UID: "7b6a888c-2896-4125-9947-9a31b5091820"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.323609 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42zs5\" (UniqueName: \"kubernetes.io/projected/7b6a888c-2896-4125-9947-9a31b5091820-kube-api-access-42zs5\") on node \"crc\" DevicePath \"\"" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.323652 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.323665 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6a888c-2896-4125-9947-9a31b5091820-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.377629 4949 generic.go:334] "Generic (PLEG): container finished" podID="7b6a888c-2896-4125-9947-9a31b5091820" containerID="63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea" exitCode=0 Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.377676 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7dbc" event={"ID":"7b6a888c-2896-4125-9947-9a31b5091820","Type":"ContainerDied","Data":"63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea"} Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.377707 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7dbc" event={"ID":"7b6a888c-2896-4125-9947-9a31b5091820","Type":"ContainerDied","Data":"f7aeec26a8216c4de50a619ecb2e0ed1c35308e0575bd2fbbdc021565e6e6d47"} Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.377725 4949 scope.go:117] "RemoveContainer" containerID="63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.378221 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7dbc" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.398765 4949 scope.go:117] "RemoveContainer" containerID="a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.433361 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z7dbc"] Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.439731 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z7dbc"] Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.452034 4949 scope.go:117] "RemoveContainer" containerID="22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.502684 4949 scope.go:117] "RemoveContainer" containerID="63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea" Feb 16 11:53:11 crc kubenswrapper[4949]: E0216 11:53:11.508455 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea\": container with ID starting with 63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea not found: ID does not exist" containerID="63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.508530 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea"} err="failed to get container status \"63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea\": rpc error: code = NotFound desc = could not find container \"63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea\": container with ID starting with 63fdaeaf291462cf8592403d8323a71f8176ad622c1af32adfbf63c51a4f48ea not found: ID does not exist" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.508571 4949 scope.go:117] "RemoveContainer" containerID="a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a" Feb 16 11:53:11 crc kubenswrapper[4949]: E0216 11:53:11.509142 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a\": container with ID starting with a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a not found: ID does not exist" containerID="a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.509200 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a"} err="failed to get container status \"a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a\": rpc error: code = NotFound desc = could not find container \"a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a\": container with ID starting with a40b74c6f51f10971a99e430d997bc57c8ed1a0089d2d4aa42564dc9a835898a not found: ID does not exist" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.509228 4949 scope.go:117] "RemoveContainer" containerID="22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3" Feb 16 11:53:11 crc kubenswrapper[4949]: E0216 11:53:11.509675 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3\": container with ID starting with 22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3 not found: ID does not exist" containerID="22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3" Feb 16 11:53:11 crc kubenswrapper[4949]: I0216 11:53:11.509710 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3"} err="failed to get container status \"22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3\": rpc error: code = NotFound desc = could not find container \"22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3\": container with ID starting with 22d58cfaf62459d2d190731a0d6387f6f3a584bfdd4740a0f01b237c3c96a9d3 not found: ID does not exist" Feb 16 11:53:13 crc kubenswrapper[4949]: I0216 11:53:13.250622 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b6a888c-2896-4125-9947-9a31b5091820" path="/var/lib/kubelet/pods/7b6a888c-2896-4125-9947-9a31b5091820/volumes" Feb 16 11:53:25 crc kubenswrapper[4949]: E0216 11:53:25.237596 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:53:25 crc kubenswrapper[4949]: E0216 11:53:25.238096 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:53:34 crc kubenswrapper[4949]: I0216 11:53:34.550611 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:53:34 crc kubenswrapper[4949]: I0216 11:53:34.551135 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:53:36 crc kubenswrapper[4949]: E0216 11:53:36.237251 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:53:36 crc kubenswrapper[4949]: E0216 11:53:36.237871 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:53:47 crc kubenswrapper[4949]: E0216 11:53:47.238068 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:53:51 crc kubenswrapper[4949]: E0216 11:53:51.268977 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:54:00 crc kubenswrapper[4949]: E0216 11:54:00.238240 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:54:04 crc kubenswrapper[4949]: I0216 11:54:04.550686 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:54:04 crc kubenswrapper[4949]: I0216 11:54:04.551329 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:54:04 crc kubenswrapper[4949]: I0216 11:54:04.551386 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:54:04 crc kubenswrapper[4949]: I0216 11:54:04.552480 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b2547e29673da8edc36fedc7e79471a698f99d8dbbbcf9abc8645e00ea426f97"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:54:04 crc kubenswrapper[4949]: I0216 11:54:04.552559 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://b2547e29673da8edc36fedc7e79471a698f99d8dbbbcf9abc8645e00ea426f97" gracePeriod=600 Feb 16 11:54:05 crc kubenswrapper[4949]: I0216 11:54:05.022331 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="b2547e29673da8edc36fedc7e79471a698f99d8dbbbcf9abc8645e00ea426f97" exitCode=0 Feb 16 11:54:05 crc kubenswrapper[4949]: I0216 11:54:05.023882 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"b2547e29673da8edc36fedc7e79471a698f99d8dbbbcf9abc8645e00ea426f97"} Feb 16 11:54:05 crc kubenswrapper[4949]: I0216 11:54:05.024461 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6"} Feb 16 11:54:05 crc kubenswrapper[4949]: I0216 11:54:05.024587 4949 scope.go:117] "RemoveContainer" containerID="739a990adef2b23deb9d2adfe3c94bdae210ea27ced100a67c5d19e922815eb4" Feb 16 11:54:05 crc kubenswrapper[4949]: E0216 11:54:05.237102 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:54:13 crc kubenswrapper[4949]: E0216 11:54:13.238159 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:54:19 crc kubenswrapper[4949]: E0216 11:54:19.240034 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:54:27 crc kubenswrapper[4949]: E0216 11:54:27.238453 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:54:30 crc kubenswrapper[4949]: E0216 11:54:30.238563 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:54:38 crc kubenswrapper[4949]: E0216 11:54:38.239568 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:54:42 crc kubenswrapper[4949]: E0216 11:54:42.237287 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:54:50 crc kubenswrapper[4949]: E0216 11:54:50.237442 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:54:53 crc kubenswrapper[4949]: E0216 11:54:53.238358 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:55:05 crc kubenswrapper[4949]: E0216 11:55:05.237444 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:55:07 crc kubenswrapper[4949]: E0216 11:55:07.238324 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:55:16 crc kubenswrapper[4949]: E0216 11:55:16.238697 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:55:20 crc kubenswrapper[4949]: E0216 11:55:20.238676 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:55:31 crc kubenswrapper[4949]: E0216 11:55:31.247672 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:55:34 crc kubenswrapper[4949]: E0216 11:55:34.240645 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:55:47 crc kubenswrapper[4949]: E0216 11:55:47.243328 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:55:49 crc kubenswrapper[4949]: E0216 11:55:49.239290 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:55:59 crc kubenswrapper[4949]: E0216 11:55:59.238936 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:56:02 crc kubenswrapper[4949]: E0216 11:56:02.240267 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:56:04 crc kubenswrapper[4949]: I0216 11:56:04.550072 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:56:04 crc kubenswrapper[4949]: I0216 11:56:04.550392 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:56:13 crc kubenswrapper[4949]: E0216 11:56:13.237828 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:56:15 crc kubenswrapper[4949]: E0216 11:56:15.239755 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:56:26 crc kubenswrapper[4949]: I0216 11:56:26.238446 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 11:56:26 crc kubenswrapper[4949]: E0216 11:56:26.365984 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:56:26 crc kubenswrapper[4949]: E0216 11:56:26.366056 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 11:56:26 crc kubenswrapper[4949]: E0216 11:56:26.366229 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:56:26 crc kubenswrapper[4949]: E0216 11:56:26.367090 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:56:26 crc kubenswrapper[4949]: E0216 11:56:26.367147 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 11:56:26 crc kubenswrapper[4949]: E0216 11:56:26.367284 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 11:56:26 crc kubenswrapper[4949]: E0216 11:56:26.367363 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:56:26 crc kubenswrapper[4949]: E0216 11:56:26.369329 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:56:34 crc kubenswrapper[4949]: I0216 11:56:34.550324 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:56:34 crc kubenswrapper[4949]: I0216 11:56:34.551115 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:56:39 crc kubenswrapper[4949]: E0216 11:56:39.240272 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:56:40 crc kubenswrapper[4949]: E0216 11:56:40.244733 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:56:51 crc kubenswrapper[4949]: E0216 11:56:51.258477 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:56:55 crc kubenswrapper[4949]: E0216 11:56:55.241867 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:57:04 crc kubenswrapper[4949]: E0216 11:57:04.239376 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:57:04 crc kubenswrapper[4949]: I0216 11:57:04.550446 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 11:57:04 crc kubenswrapper[4949]: I0216 11:57:04.550804 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 11:57:04 crc kubenswrapper[4949]: I0216 11:57:04.550865 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 11:57:04 crc kubenswrapper[4949]: I0216 11:57:04.552318 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 11:57:04 crc kubenswrapper[4949]: I0216 11:57:04.552427 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" gracePeriod=600 Feb 16 11:57:04 crc kubenswrapper[4949]: E0216 11:57:04.677337 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:57:05 crc kubenswrapper[4949]: I0216 11:57:05.575121 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" exitCode=0 Feb 16 11:57:05 crc kubenswrapper[4949]: I0216 11:57:05.575184 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6"} Feb 16 11:57:05 crc kubenswrapper[4949]: I0216 11:57:05.575230 4949 scope.go:117] "RemoveContainer" containerID="b2547e29673da8edc36fedc7e79471a698f99d8dbbbcf9abc8645e00ea426f97" Feb 16 11:57:05 crc kubenswrapper[4949]: I0216 11:57:05.576160 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:57:05 crc kubenswrapper[4949]: E0216 11:57:05.576585 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:57:08 crc kubenswrapper[4949]: E0216 11:57:08.238622 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:57:17 crc kubenswrapper[4949]: I0216 11:57:17.236239 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:57:17 crc kubenswrapper[4949]: E0216 11:57:17.236972 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:57:17 crc kubenswrapper[4949]: E0216 11:57:17.238809 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:57:20 crc kubenswrapper[4949]: E0216 11:57:20.240633 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.758460 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-k5vqd"] Feb 16 11:57:25 crc kubenswrapper[4949]: E0216 11:57:25.759694 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b6a888c-2896-4125-9947-9a31b5091820" containerName="extract-content" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.759712 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b6a888c-2896-4125-9947-9a31b5091820" containerName="extract-content" Feb 16 11:57:25 crc kubenswrapper[4949]: E0216 11:57:25.759740 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b6a888c-2896-4125-9947-9a31b5091820" containerName="extract-utilities" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.759749 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b6a888c-2896-4125-9947-9a31b5091820" containerName="extract-utilities" Feb 16 11:57:25 crc kubenswrapper[4949]: E0216 11:57:25.759779 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b6a888c-2896-4125-9947-9a31b5091820" containerName="registry-server" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.759787 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b6a888c-2896-4125-9947-9a31b5091820" containerName="registry-server" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.760065 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b6a888c-2896-4125-9947-9a31b5091820" containerName="registry-server" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.762309 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.774395 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5vqd"] Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.852876 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-utilities\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.853034 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8rbs\" (UniqueName: \"kubernetes.io/projected/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-kube-api-access-p8rbs\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.853195 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-catalog-content\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.956333 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-utilities\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.956484 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8rbs\" (UniqueName: \"kubernetes.io/projected/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-kube-api-access-p8rbs\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.956580 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-catalog-content\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.957460 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-catalog-content\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.957745 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-utilities\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:25 crc kubenswrapper[4949]: I0216 11:57:25.980535 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8rbs\" (UniqueName: \"kubernetes.io/projected/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-kube-api-access-p8rbs\") pod \"redhat-marketplace-k5vqd\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:26 crc kubenswrapper[4949]: I0216 11:57:26.106002 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:26 crc kubenswrapper[4949]: I0216 11:57:26.628400 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5vqd"] Feb 16 11:57:26 crc kubenswrapper[4949]: I0216 11:57:26.838460 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5vqd" event={"ID":"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9","Type":"ContainerStarted","Data":"bdbe4c762545bd42a0da48644f544c9f6a70cc09403ba2f160db59132e2e4cc6"} Feb 16 11:57:27 crc kubenswrapper[4949]: I0216 11:57:27.854430 4949 generic.go:334] "Generic (PLEG): container finished" podID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerID="23c4b843a08d967cdbf3bdab0bb01e94718a9307a0ecab583322d781b7ffd8f9" exitCode=0 Feb 16 11:57:27 crc kubenswrapper[4949]: I0216 11:57:27.854488 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5vqd" event={"ID":"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9","Type":"ContainerDied","Data":"23c4b843a08d967cdbf3bdab0bb01e94718a9307a0ecab583322d781b7ffd8f9"} Feb 16 11:57:28 crc kubenswrapper[4949]: E0216 11:57:28.236452 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:57:28 crc kubenswrapper[4949]: I0216 11:57:28.867068 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5vqd" event={"ID":"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9","Type":"ContainerStarted","Data":"6e72769bc5ab160d3fae3dadf1f49c3240b8ec22c226812df420b2f40adbbf7e"} Feb 16 11:57:29 crc kubenswrapper[4949]: I0216 11:57:29.878426 4949 generic.go:334] "Generic (PLEG): container finished" podID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerID="6e72769bc5ab160d3fae3dadf1f49c3240b8ec22c226812df420b2f40adbbf7e" exitCode=0 Feb 16 11:57:29 crc kubenswrapper[4949]: I0216 11:57:29.878480 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5vqd" event={"ID":"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9","Type":"ContainerDied","Data":"6e72769bc5ab160d3fae3dadf1f49c3240b8ec22c226812df420b2f40adbbf7e"} Feb 16 11:57:30 crc kubenswrapper[4949]: I0216 11:57:30.235352 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:57:30 crc kubenswrapper[4949]: E0216 11:57:30.235935 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:57:30 crc kubenswrapper[4949]: I0216 11:57:30.892240 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5vqd" event={"ID":"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9","Type":"ContainerStarted","Data":"191b87b3d71af2e381580f53c8ec515353b0826c9fa832f92e10de0fd840cf4e"} Feb 16 11:57:30 crc kubenswrapper[4949]: I0216 11:57:30.928647 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-k5vqd" podStartSLOduration=3.500433741 podStartE2EDuration="5.928621708s" podCreationTimestamp="2026-02-16 11:57:25 +0000 UTC" firstStartedPulling="2026-02-16 11:57:27.856596311 +0000 UTC m=+3037.485930476" lastFinishedPulling="2026-02-16 11:57:30.284784258 +0000 UTC m=+3039.914118443" observedRunningTime="2026-02-16 11:57:30.913832868 +0000 UTC m=+3040.543167033" watchObservedRunningTime="2026-02-16 11:57:30.928621708 +0000 UTC m=+3040.557955893" Feb 16 11:57:34 crc kubenswrapper[4949]: E0216 11:57:34.238475 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:57:36 crc kubenswrapper[4949]: I0216 11:57:36.111652 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:36 crc kubenswrapper[4949]: I0216 11:57:36.112063 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:36 crc kubenswrapper[4949]: I0216 11:57:36.178141 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:37 crc kubenswrapper[4949]: I0216 11:57:37.040611 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:39 crc kubenswrapper[4949]: I0216 11:57:39.723110 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5vqd"] Feb 16 11:57:39 crc kubenswrapper[4949]: I0216 11:57:39.723642 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-k5vqd" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerName="registry-server" containerID="cri-o://191b87b3d71af2e381580f53c8ec515353b0826c9fa832f92e10de0fd840cf4e" gracePeriod=2 Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.006897 4949 generic.go:334] "Generic (PLEG): container finished" podID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerID="191b87b3d71af2e381580f53c8ec515353b0826c9fa832f92e10de0fd840cf4e" exitCode=0 Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.006971 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5vqd" event={"ID":"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9","Type":"ContainerDied","Data":"191b87b3d71af2e381580f53c8ec515353b0826c9fa832f92e10de0fd840cf4e"} Feb 16 11:57:40 crc kubenswrapper[4949]: E0216 11:57:40.245327 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.347516 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.353311 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-catalog-content\") pod \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.353452 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8rbs\" (UniqueName: \"kubernetes.io/projected/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-kube-api-access-p8rbs\") pod \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.353536 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-utilities\") pod \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\" (UID: \"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9\") " Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.355277 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-utilities" (OuterVolumeSpecName: "utilities") pod "0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" (UID: "0d6418c9-7112-4a2e-a8b0-6447b9dddaf9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.362931 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-kube-api-access-p8rbs" (OuterVolumeSpecName: "kube-api-access-p8rbs") pod "0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" (UID: "0d6418c9-7112-4a2e-a8b0-6447b9dddaf9"). InnerVolumeSpecName "kube-api-access-p8rbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.397111 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" (UID: "0d6418c9-7112-4a2e-a8b0-6447b9dddaf9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.457373 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.457399 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 11:57:40 crc kubenswrapper[4949]: I0216 11:57:40.457412 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8rbs\" (UniqueName: \"kubernetes.io/projected/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9-kube-api-access-p8rbs\") on node \"crc\" DevicePath \"\"" Feb 16 11:57:41 crc kubenswrapper[4949]: I0216 11:57:41.036920 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k5vqd" event={"ID":"0d6418c9-7112-4a2e-a8b0-6447b9dddaf9","Type":"ContainerDied","Data":"bdbe4c762545bd42a0da48644f544c9f6a70cc09403ba2f160db59132e2e4cc6"} Feb 16 11:57:41 crc kubenswrapper[4949]: I0216 11:57:41.037260 4949 scope.go:117] "RemoveContainer" containerID="191b87b3d71af2e381580f53c8ec515353b0826c9fa832f92e10de0fd840cf4e" Feb 16 11:57:41 crc kubenswrapper[4949]: I0216 11:57:41.037086 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k5vqd" Feb 16 11:57:41 crc kubenswrapper[4949]: I0216 11:57:41.103059 4949 scope.go:117] "RemoveContainer" containerID="6e72769bc5ab160d3fae3dadf1f49c3240b8ec22c226812df420b2f40adbbf7e" Feb 16 11:57:41 crc kubenswrapper[4949]: I0216 11:57:41.111819 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5vqd"] Feb 16 11:57:41 crc kubenswrapper[4949]: I0216 11:57:41.122103 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-k5vqd"] Feb 16 11:57:41 crc kubenswrapper[4949]: I0216 11:57:41.134563 4949 scope.go:117] "RemoveContainer" containerID="23c4b843a08d967cdbf3bdab0bb01e94718a9307a0ecab583322d781b7ffd8f9" Feb 16 11:57:41 crc kubenswrapper[4949]: I0216 11:57:41.246670 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" path="/var/lib/kubelet/pods/0d6418c9-7112-4a2e-a8b0-6447b9dddaf9/volumes" Feb 16 11:57:42 crc kubenswrapper[4949]: I0216 11:57:42.236112 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:57:42 crc kubenswrapper[4949]: E0216 11:57:42.236796 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:57:48 crc kubenswrapper[4949]: E0216 11:57:48.240752 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:57:53 crc kubenswrapper[4949]: I0216 11:57:53.254824 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:57:53 crc kubenswrapper[4949]: E0216 11:57:53.260823 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:57:55 crc kubenswrapper[4949]: E0216 11:57:55.237955 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:58:00 crc kubenswrapper[4949]: E0216 11:58:00.243714 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:58:04 crc kubenswrapper[4949]: I0216 11:58:04.236043 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:58:04 crc kubenswrapper[4949]: E0216 11:58:04.238754 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:58:10 crc kubenswrapper[4949]: E0216 11:58:10.238517 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:58:11 crc kubenswrapper[4949]: E0216 11:58:11.248900 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:58:15 crc kubenswrapper[4949]: I0216 11:58:15.235847 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:58:15 crc kubenswrapper[4949]: E0216 11:58:15.236762 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:58:23 crc kubenswrapper[4949]: E0216 11:58:23.240257 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:58:24 crc kubenswrapper[4949]: E0216 11:58:24.238198 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:58:27 crc kubenswrapper[4949]: I0216 11:58:27.235552 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:58:27 crc kubenswrapper[4949]: E0216 11:58:27.236476 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:58:38 crc kubenswrapper[4949]: E0216 11:58:38.237714 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:58:39 crc kubenswrapper[4949]: I0216 11:58:39.236320 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:58:39 crc kubenswrapper[4949]: E0216 11:58:39.237117 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:58:39 crc kubenswrapper[4949]: E0216 11:58:39.239043 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:58:51 crc kubenswrapper[4949]: E0216 11:58:51.255550 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:58:53 crc kubenswrapper[4949]: I0216 11:58:53.236132 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:58:53 crc kubenswrapper[4949]: E0216 11:58:53.236947 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:58:53 crc kubenswrapper[4949]: E0216 11:58:53.238458 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:59:04 crc kubenswrapper[4949]: E0216 11:59:04.237562 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:59:04 crc kubenswrapper[4949]: E0216 11:59:04.238583 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:59:06 crc kubenswrapper[4949]: I0216 11:59:06.235832 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:59:06 crc kubenswrapper[4949]: E0216 11:59:06.236405 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:59:16 crc kubenswrapper[4949]: I0216 11:59:16.152047 4949 generic.go:334] "Generic (PLEG): container finished" podID="4583cb4e-c70d-4638-a948-75a5f5cfc593" containerID="fbe0215a914da3a46ba22035bed8caa512c42afa3c1d94670173b94c2034b582" exitCode=2 Feb 16 11:59:16 crc kubenswrapper[4949]: I0216 11:59:16.152080 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" event={"ID":"4583cb4e-c70d-4638-a948-75a5f5cfc593","Type":"ContainerDied","Data":"fbe0215a914da3a46ba22035bed8caa512c42afa3c1d94670173b94c2034b582"} Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.681574 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.770859 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-inventory\") pod \"4583cb4e-c70d-4638-a948-75a5f5cfc593\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.771469 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-ssh-key-openstack-edpm-ipam\") pod \"4583cb4e-c70d-4638-a948-75a5f5cfc593\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.771520 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdc4k\" (UniqueName: \"kubernetes.io/projected/4583cb4e-c70d-4638-a948-75a5f5cfc593-kube-api-access-fdc4k\") pod \"4583cb4e-c70d-4638-a948-75a5f5cfc593\" (UID: \"4583cb4e-c70d-4638-a948-75a5f5cfc593\") " Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.789079 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4583cb4e-c70d-4638-a948-75a5f5cfc593-kube-api-access-fdc4k" (OuterVolumeSpecName: "kube-api-access-fdc4k") pod "4583cb4e-c70d-4638-a948-75a5f5cfc593" (UID: "4583cb4e-c70d-4638-a948-75a5f5cfc593"). InnerVolumeSpecName "kube-api-access-fdc4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.811143 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4583cb4e-c70d-4638-a948-75a5f5cfc593" (UID: "4583cb4e-c70d-4638-a948-75a5f5cfc593"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.814888 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-inventory" (OuterVolumeSpecName: "inventory") pod "4583cb4e-c70d-4638-a948-75a5f5cfc593" (UID: "4583cb4e-c70d-4638-a948-75a5f5cfc593"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.874666 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdc4k\" (UniqueName: \"kubernetes.io/projected/4583cb4e-c70d-4638-a948-75a5f5cfc593-kube-api-access-fdc4k\") on node \"crc\" DevicePath \"\"" Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.874706 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 11:59:17 crc kubenswrapper[4949]: I0216 11:59:17.874717 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4583cb4e-c70d-4638-a948-75a5f5cfc593-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 11:59:18 crc kubenswrapper[4949]: I0216 11:59:18.180481 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" event={"ID":"4583cb4e-c70d-4638-a948-75a5f5cfc593","Type":"ContainerDied","Data":"306f0444a22348b30309331450fe35c9570e1341e191e4aa800b2d35349c593b"} Feb 16 11:59:18 crc kubenswrapper[4949]: I0216 11:59:18.180525 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b" Feb 16 11:59:18 crc kubenswrapper[4949]: I0216 11:59:18.180548 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="306f0444a22348b30309331450fe35c9570e1341e191e4aa800b2d35349c593b" Feb 16 11:59:19 crc kubenswrapper[4949]: E0216 11:59:19.244214 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:59:19 crc kubenswrapper[4949]: E0216 11:59:19.244759 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:59:21 crc kubenswrapper[4949]: I0216 11:59:21.244424 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:59:21 crc kubenswrapper[4949]: E0216 11:59:21.245499 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:59:32 crc kubenswrapper[4949]: E0216 11:59:32.238803 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:59:33 crc kubenswrapper[4949]: E0216 11:59:33.237656 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:59:36 crc kubenswrapper[4949]: I0216 11:59:36.235729 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:59:36 crc kubenswrapper[4949]: E0216 11:59:36.236268 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:59:43 crc kubenswrapper[4949]: E0216 11:59:43.239607 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:59:45 crc kubenswrapper[4949]: E0216 11:59:45.239398 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 11:59:49 crc kubenswrapper[4949]: I0216 11:59:49.248290 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 11:59:49 crc kubenswrapper[4949]: E0216 11:59:49.250681 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.048127 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69"] Feb 16 11:59:55 crc kubenswrapper[4949]: E0216 11:59:55.049664 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerName="registry-server" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.049689 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerName="registry-server" Feb 16 11:59:55 crc kubenswrapper[4949]: E0216 11:59:55.049711 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4583cb4e-c70d-4638-a948-75a5f5cfc593" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.049725 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="4583cb4e-c70d-4638-a948-75a5f5cfc593" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:59:55 crc kubenswrapper[4949]: E0216 11:59:55.049750 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerName="extract-content" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.049761 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerName="extract-content" Feb 16 11:59:55 crc kubenswrapper[4949]: E0216 11:59:55.049791 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerName="extract-utilities" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.049803 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerName="extract-utilities" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.050222 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="4583cb4e-c70d-4638-a948-75a5f5cfc593" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.050280 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d6418c9-7112-4a2e-a8b0-6447b9dddaf9" containerName="registry-server" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.051923 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.056038 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.056256 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.056047 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.058874 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.080542 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69"] Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.203874 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.204012 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvh8f\" (UniqueName: \"kubernetes.io/projected/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-kube-api-access-qvh8f\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.204433 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: E0216 11:59:55.236649 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.306430 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.306570 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvh8f\" (UniqueName: \"kubernetes.io/projected/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-kube-api-access-qvh8f\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.307635 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.314267 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.321293 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.324831 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvh8f\" (UniqueName: \"kubernetes.io/projected/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-kube-api-access-qvh8f\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gqs69\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:55 crc kubenswrapper[4949]: I0216 11:59:55.378145 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 11:59:56 crc kubenswrapper[4949]: I0216 11:59:56.015052 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69"] Feb 16 11:59:56 crc kubenswrapper[4949]: I0216 11:59:56.600692 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" event={"ID":"9991be76-b16a-4afd-bcc6-05dc7dfe9da1","Type":"ContainerStarted","Data":"02adfd99d962ebbcad3229e7d386472ce1e7e1a777cfb50867412bd19fc2b9f5"} Feb 16 11:59:57 crc kubenswrapper[4949]: I0216 11:59:57.616396 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" event={"ID":"9991be76-b16a-4afd-bcc6-05dc7dfe9da1","Type":"ContainerStarted","Data":"0954ddbd1f76cbc204485986054c5e07737f0ee159361a4b92e157d0ae2799f9"} Feb 16 11:59:57 crc kubenswrapper[4949]: I0216 11:59:57.649377 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" podStartSLOduration=2.100087228 podStartE2EDuration="2.649342426s" podCreationTimestamp="2026-02-16 11:59:55 +0000 UTC" firstStartedPulling="2026-02-16 11:59:56.035098303 +0000 UTC m=+3185.664432468" lastFinishedPulling="2026-02-16 11:59:56.584353501 +0000 UTC m=+3186.213687666" observedRunningTime="2026-02-16 11:59:57.639032344 +0000 UTC m=+3187.268366529" watchObservedRunningTime="2026-02-16 11:59:57.649342426 +0000 UTC m=+3187.278676631" Feb 16 11:59:59 crc kubenswrapper[4949]: E0216 11:59:59.239159 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.150530 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl"] Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.153004 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.156039 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.156313 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.165415 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl"] Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.245701 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/845dbfa2-42a8-4303-9679-b5e238392546-config-volume\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.245975 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/845dbfa2-42a8-4303-9679-b5e238392546-secret-volume\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.246041 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdsqw\" (UniqueName: \"kubernetes.io/projected/845dbfa2-42a8-4303-9679-b5e238392546-kube-api-access-sdsqw\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.348037 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/845dbfa2-42a8-4303-9679-b5e238392546-secret-volume\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.348138 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdsqw\" (UniqueName: \"kubernetes.io/projected/845dbfa2-42a8-4303-9679-b5e238392546-kube-api-access-sdsqw\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.348240 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/845dbfa2-42a8-4303-9679-b5e238392546-config-volume\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.349628 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/845dbfa2-42a8-4303-9679-b5e238392546-config-volume\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.356030 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/845dbfa2-42a8-4303-9679-b5e238392546-secret-volume\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.364844 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdsqw\" (UniqueName: \"kubernetes.io/projected/845dbfa2-42a8-4303-9679-b5e238392546-kube-api-access-sdsqw\") pod \"collect-profiles-29520720-svnnl\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.480964 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:00 crc kubenswrapper[4949]: I0216 12:00:00.955159 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl"] Feb 16 12:00:01 crc kubenswrapper[4949]: I0216 12:00:01.671944 4949 generic.go:334] "Generic (PLEG): container finished" podID="845dbfa2-42a8-4303-9679-b5e238392546" containerID="fd592049b83cf64e5528128ba158c16671ee833698c87b4d9ab48188e9c5aa83" exitCode=0 Feb 16 12:00:01 crc kubenswrapper[4949]: I0216 12:00:01.672060 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" event={"ID":"845dbfa2-42a8-4303-9679-b5e238392546","Type":"ContainerDied","Data":"fd592049b83cf64e5528128ba158c16671ee833698c87b4d9ab48188e9c5aa83"} Feb 16 12:00:01 crc kubenswrapper[4949]: I0216 12:00:01.672961 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" event={"ID":"845dbfa2-42a8-4303-9679-b5e238392546","Type":"ContainerStarted","Data":"0db45bb266226d0a4fd46bf3c508e0451ec61cab9491aa6fe2bd06447a383d0b"} Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.072594 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.229879 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/845dbfa2-42a8-4303-9679-b5e238392546-config-volume\") pod \"845dbfa2-42a8-4303-9679-b5e238392546\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.230440 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/845dbfa2-42a8-4303-9679-b5e238392546-secret-volume\") pod \"845dbfa2-42a8-4303-9679-b5e238392546\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.230491 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdsqw\" (UniqueName: \"kubernetes.io/projected/845dbfa2-42a8-4303-9679-b5e238392546-kube-api-access-sdsqw\") pod \"845dbfa2-42a8-4303-9679-b5e238392546\" (UID: \"845dbfa2-42a8-4303-9679-b5e238392546\") " Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.233144 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/845dbfa2-42a8-4303-9679-b5e238392546-config-volume" (OuterVolumeSpecName: "config-volume") pod "845dbfa2-42a8-4303-9679-b5e238392546" (UID: "845dbfa2-42a8-4303-9679-b5e238392546"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.239579 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:00:03 crc kubenswrapper[4949]: E0216 12:00:03.240361 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.242550 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/845dbfa2-42a8-4303-9679-b5e238392546-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "845dbfa2-42a8-4303-9679-b5e238392546" (UID: "845dbfa2-42a8-4303-9679-b5e238392546"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.254979 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/845dbfa2-42a8-4303-9679-b5e238392546-kube-api-access-sdsqw" (OuterVolumeSpecName: "kube-api-access-sdsqw") pod "845dbfa2-42a8-4303-9679-b5e238392546" (UID: "845dbfa2-42a8-4303-9679-b5e238392546"). InnerVolumeSpecName "kube-api-access-sdsqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.333648 4949 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/845dbfa2-42a8-4303-9679-b5e238392546-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.333695 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdsqw\" (UniqueName: \"kubernetes.io/projected/845dbfa2-42a8-4303-9679-b5e238392546-kube-api-access-sdsqw\") on node \"crc\" DevicePath \"\"" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.333713 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/845dbfa2-42a8-4303-9679-b5e238392546-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.696832 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" event={"ID":"845dbfa2-42a8-4303-9679-b5e238392546","Type":"ContainerDied","Data":"0db45bb266226d0a4fd46bf3c508e0451ec61cab9491aa6fe2bd06447a383d0b"} Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.696873 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0db45bb266226d0a4fd46bf3c508e0451ec61cab9491aa6fe2bd06447a383d0b" Feb 16 12:00:03 crc kubenswrapper[4949]: I0216 12:00:03.696933 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl" Feb 16 12:00:04 crc kubenswrapper[4949]: I0216 12:00:04.159956 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6"] Feb 16 12:00:04 crc kubenswrapper[4949]: I0216 12:00:04.171889 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520675-fcng6"] Feb 16 12:00:05 crc kubenswrapper[4949]: I0216 12:00:05.251684 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9d0f7d5-6713-4be3-8daf-551e1647ad78" path="/var/lib/kubelet/pods/a9d0f7d5-6713-4be3-8daf-551e1647ad78/volumes" Feb 16 12:00:09 crc kubenswrapper[4949]: E0216 12:00:09.237105 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:00:11 crc kubenswrapper[4949]: E0216 12:00:11.255962 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:00:14 crc kubenswrapper[4949]: I0216 12:00:14.236685 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:00:14 crc kubenswrapper[4949]: E0216 12:00:14.239106 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:00:21 crc kubenswrapper[4949]: E0216 12:00:21.249914 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:00:22 crc kubenswrapper[4949]: E0216 12:00:22.238616 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:00:29 crc kubenswrapper[4949]: I0216 12:00:29.236575 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:00:29 crc kubenswrapper[4949]: E0216 12:00:29.237485 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:00:36 crc kubenswrapper[4949]: E0216 12:00:36.237518 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:00:37 crc kubenswrapper[4949]: E0216 12:00:37.237159 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:00:44 crc kubenswrapper[4949]: I0216 12:00:44.235607 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:00:44 crc kubenswrapper[4949]: E0216 12:00:44.237094 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:00:48 crc kubenswrapper[4949]: E0216 12:00:48.238044 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:00:49 crc kubenswrapper[4949]: E0216 12:00:49.244277 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:00:55 crc kubenswrapper[4949]: I0216 12:00:55.235647 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:00:55 crc kubenswrapper[4949]: E0216 12:00:55.236579 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:00:58 crc kubenswrapper[4949]: I0216 12:00:58.930548 4949 scope.go:117] "RemoveContainer" containerID="3ec6469594d830601a5cc016e458f152574f921a55ce547dc9339c1b930a81ee" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.189467 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29520721-mvkww"] Feb 16 12:01:00 crc kubenswrapper[4949]: E0216 12:01:00.190628 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="845dbfa2-42a8-4303-9679-b5e238392546" containerName="collect-profiles" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.190650 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="845dbfa2-42a8-4303-9679-b5e238392546" containerName="collect-profiles" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.191089 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="845dbfa2-42a8-4303-9679-b5e238392546" containerName="collect-profiles" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.192532 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.201639 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29520721-mvkww"] Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.328361 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-combined-ca-bundle\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.328841 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-fernet-keys\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.329215 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-config-data\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.329906 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7fhl\" (UniqueName: \"kubernetes.io/projected/d40cbc9a-bb66-4817-9728-c46d635e4ed9-kube-api-access-w7fhl\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.432020 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-fernet-keys\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.432101 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-config-data\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.432217 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7fhl\" (UniqueName: \"kubernetes.io/projected/d40cbc9a-bb66-4817-9728-c46d635e4ed9-kube-api-access-w7fhl\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.432271 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-combined-ca-bundle\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.438489 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-combined-ca-bundle\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.439058 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-fernet-keys\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.443946 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-config-data\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.461555 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7fhl\" (UniqueName: \"kubernetes.io/projected/d40cbc9a-bb66-4817-9728-c46d635e4ed9-kube-api-access-w7fhl\") pod \"keystone-cron-29520721-mvkww\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:00 crc kubenswrapper[4949]: I0216 12:01:00.539719 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:01 crc kubenswrapper[4949]: I0216 12:01:01.094736 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29520721-mvkww"] Feb 16 12:01:01 crc kubenswrapper[4949]: W0216 12:01:01.099655 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd40cbc9a_bb66_4817_9728_c46d635e4ed9.slice/crio-f2a10884a8d6457b7be0cbaf173b0c60d96e3f316b1e56c5a336d2eed74a022d WatchSource:0}: Error finding container f2a10884a8d6457b7be0cbaf173b0c60d96e3f316b1e56c5a336d2eed74a022d: Status 404 returned error can't find the container with id f2a10884a8d6457b7be0cbaf173b0c60d96e3f316b1e56c5a336d2eed74a022d Feb 16 12:01:01 crc kubenswrapper[4949]: I0216 12:01:01.357827 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29520721-mvkww" event={"ID":"d40cbc9a-bb66-4817-9728-c46d635e4ed9","Type":"ContainerStarted","Data":"76bc5ea2df9f794e935544d1f7abf4fb6d27a06e44b5032350b3385b7af87d75"} Feb 16 12:01:01 crc kubenswrapper[4949]: I0216 12:01:01.358258 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29520721-mvkww" event={"ID":"d40cbc9a-bb66-4817-9728-c46d635e4ed9","Type":"ContainerStarted","Data":"f2a10884a8d6457b7be0cbaf173b0c60d96e3f316b1e56c5a336d2eed74a022d"} Feb 16 12:01:01 crc kubenswrapper[4949]: I0216 12:01:01.384488 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29520721-mvkww" podStartSLOduration=1.384469078 podStartE2EDuration="1.384469078s" podCreationTimestamp="2026-02-16 12:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 12:01:01.378220691 +0000 UTC m=+3251.007554906" watchObservedRunningTime="2026-02-16 12:01:01.384469078 +0000 UTC m=+3251.013803243" Feb 16 12:01:02 crc kubenswrapper[4949]: E0216 12:01:02.246885 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:01:02 crc kubenswrapper[4949]: E0216 12:01:02.248109 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:01:04 crc kubenswrapper[4949]: I0216 12:01:04.397895 4949 generic.go:334] "Generic (PLEG): container finished" podID="d40cbc9a-bb66-4817-9728-c46d635e4ed9" containerID="76bc5ea2df9f794e935544d1f7abf4fb6d27a06e44b5032350b3385b7af87d75" exitCode=0 Feb 16 12:01:04 crc kubenswrapper[4949]: I0216 12:01:04.398013 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29520721-mvkww" event={"ID":"d40cbc9a-bb66-4817-9728-c46d635e4ed9","Type":"ContainerDied","Data":"76bc5ea2df9f794e935544d1f7abf4fb6d27a06e44b5032350b3385b7af87d75"} Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:05.985745 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.080388 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-fernet-keys\") pod \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.080731 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-config-data\") pod \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.080793 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7fhl\" (UniqueName: \"kubernetes.io/projected/d40cbc9a-bb66-4817-9728-c46d635e4ed9-kube-api-access-w7fhl\") pod \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.081106 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-combined-ca-bundle\") pod \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\" (UID: \"d40cbc9a-bb66-4817-9728-c46d635e4ed9\") " Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.087191 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d40cbc9a-bb66-4817-9728-c46d635e4ed9" (UID: "d40cbc9a-bb66-4817-9728-c46d635e4ed9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.100209 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d40cbc9a-bb66-4817-9728-c46d635e4ed9-kube-api-access-w7fhl" (OuterVolumeSpecName: "kube-api-access-w7fhl") pod "d40cbc9a-bb66-4817-9728-c46d635e4ed9" (UID: "d40cbc9a-bb66-4817-9728-c46d635e4ed9"). InnerVolumeSpecName "kube-api-access-w7fhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.122554 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d40cbc9a-bb66-4817-9728-c46d635e4ed9" (UID: "d40cbc9a-bb66-4817-9728-c46d635e4ed9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.147368 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-config-data" (OuterVolumeSpecName: "config-data") pod "d40cbc9a-bb66-4817-9728-c46d635e4ed9" (UID: "d40cbc9a-bb66-4817-9728-c46d635e4ed9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.184012 4949 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.184043 4949 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.184052 4949 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d40cbc9a-bb66-4817-9728-c46d635e4ed9-config-data\") on node \"crc\" DevicePath \"\"" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.184062 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7fhl\" (UniqueName: \"kubernetes.io/projected/d40cbc9a-bb66-4817-9728-c46d635e4ed9-kube-api-access-w7fhl\") on node \"crc\" DevicePath \"\"" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.235839 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:01:06 crc kubenswrapper[4949]: E0216 12:01:06.236126 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.431603 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29520721-mvkww" event={"ID":"d40cbc9a-bb66-4817-9728-c46d635e4ed9","Type":"ContainerDied","Data":"f2a10884a8d6457b7be0cbaf173b0c60d96e3f316b1e56c5a336d2eed74a022d"} Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.431647 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2a10884a8d6457b7be0cbaf173b0c60d96e3f316b1e56c5a336d2eed74a022d" Feb 16 12:01:06 crc kubenswrapper[4949]: I0216 12:01:06.431666 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29520721-mvkww" Feb 16 12:01:15 crc kubenswrapper[4949]: E0216 12:01:15.239367 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:01:15 crc kubenswrapper[4949]: E0216 12:01:15.239356 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:01:19 crc kubenswrapper[4949]: I0216 12:01:19.236480 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:01:19 crc kubenswrapper[4949]: E0216 12:01:19.237325 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:01:26 crc kubenswrapper[4949]: E0216 12:01:26.238962 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:01:30 crc kubenswrapper[4949]: I0216 12:01:30.238094 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:01:30 crc kubenswrapper[4949]: E0216 12:01:30.323910 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:01:30 crc kubenswrapper[4949]: E0216 12:01:30.323973 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:01:30 crc kubenswrapper[4949]: E0216 12:01:30.324091 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:01:30 crc kubenswrapper[4949]: E0216 12:01:30.325245 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:01:33 crc kubenswrapper[4949]: I0216 12:01:33.235785 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:01:33 crc kubenswrapper[4949]: E0216 12:01:33.238580 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:01:39 crc kubenswrapper[4949]: E0216 12:01:39.364471 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:01:39 crc kubenswrapper[4949]: E0216 12:01:39.365893 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:01:39 crc kubenswrapper[4949]: E0216 12:01:39.366318 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:01:39 crc kubenswrapper[4949]: E0216 12:01:39.367585 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:01:45 crc kubenswrapper[4949]: E0216 12:01:45.237977 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:01:48 crc kubenswrapper[4949]: I0216 12:01:48.236115 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:01:48 crc kubenswrapper[4949]: E0216 12:01:48.236724 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:01:52 crc kubenswrapper[4949]: E0216 12:01:52.240336 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:01:58 crc kubenswrapper[4949]: E0216 12:01:58.238855 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:02:00 crc kubenswrapper[4949]: I0216 12:02:00.236092 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:02:00 crc kubenswrapper[4949]: E0216 12:02:00.236965 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:02:05 crc kubenswrapper[4949]: E0216 12:02:05.243737 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:02:11 crc kubenswrapper[4949]: E0216 12:02:11.241160 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:02:13 crc kubenswrapper[4949]: I0216 12:02:13.235952 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:02:14 crc kubenswrapper[4949]: I0216 12:02:14.256834 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"196d4332bced584ab96d6e55bf867c81b178dde9f045c9e3a178bad980c7d4fa"} Feb 16 12:02:17 crc kubenswrapper[4949]: E0216 12:02:17.239428 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:02:24 crc kubenswrapper[4949]: E0216 12:02:24.238373 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:02:32 crc kubenswrapper[4949]: E0216 12:02:32.237736 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:02:38 crc kubenswrapper[4949]: E0216 12:02:38.237681 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:02:46 crc kubenswrapper[4949]: E0216 12:02:46.237260 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:02:51 crc kubenswrapper[4949]: E0216 12:02:51.245620 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:03:00 crc kubenswrapper[4949]: E0216 12:03:00.237077 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.462333 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-f9r2s"] Feb 16 12:03:00 crc kubenswrapper[4949]: E0216 12:03:00.463005 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d40cbc9a-bb66-4817-9728-c46d635e4ed9" containerName="keystone-cron" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.463031 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d40cbc9a-bb66-4817-9728-c46d635e4ed9" containerName="keystone-cron" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.463335 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d40cbc9a-bb66-4817-9728-c46d635e4ed9" containerName="keystone-cron" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.465491 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.489986 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f9r2s"] Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.575521 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-utilities\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.575707 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm7xc\" (UniqueName: \"kubernetes.io/projected/6a13bbe6-0a94-4302-b276-5e643f747183-kube-api-access-vm7xc\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.575740 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-catalog-content\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.677651 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm7xc\" (UniqueName: \"kubernetes.io/projected/6a13bbe6-0a94-4302-b276-5e643f747183-kube-api-access-vm7xc\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.677720 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-catalog-content\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.677834 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-utilities\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.678327 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-catalog-content\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.678421 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-utilities\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.704925 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm7xc\" (UniqueName: \"kubernetes.io/projected/6a13bbe6-0a94-4302-b276-5e643f747183-kube-api-access-vm7xc\") pod \"community-operators-f9r2s\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:00 crc kubenswrapper[4949]: I0216 12:03:00.790536 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:01 crc kubenswrapper[4949]: I0216 12:03:01.314132 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f9r2s"] Feb 16 12:03:01 crc kubenswrapper[4949]: W0216 12:03:01.318227 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a13bbe6_0a94_4302_b276_5e643f747183.slice/crio-9a204385a5bbbe494a4b5a79e7776ef6d588b0a6c963ab39de4e20a7de19c74d WatchSource:0}: Error finding container 9a204385a5bbbe494a4b5a79e7776ef6d588b0a6c963ab39de4e20a7de19c74d: Status 404 returned error can't find the container with id 9a204385a5bbbe494a4b5a79e7776ef6d588b0a6c963ab39de4e20a7de19c74d Feb 16 12:03:01 crc kubenswrapper[4949]: I0216 12:03:01.773139 4949 generic.go:334] "Generic (PLEG): container finished" podID="6a13bbe6-0a94-4302-b276-5e643f747183" containerID="bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030" exitCode=0 Feb 16 12:03:01 crc kubenswrapper[4949]: I0216 12:03:01.773234 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f9r2s" event={"ID":"6a13bbe6-0a94-4302-b276-5e643f747183","Type":"ContainerDied","Data":"bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030"} Feb 16 12:03:01 crc kubenswrapper[4949]: I0216 12:03:01.773483 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f9r2s" event={"ID":"6a13bbe6-0a94-4302-b276-5e643f747183","Type":"ContainerStarted","Data":"9a204385a5bbbe494a4b5a79e7776ef6d588b0a6c963ab39de4e20a7de19c74d"} Feb 16 12:03:02 crc kubenswrapper[4949]: I0216 12:03:02.786220 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f9r2s" event={"ID":"6a13bbe6-0a94-4302-b276-5e643f747183","Type":"ContainerStarted","Data":"ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff"} Feb 16 12:03:03 crc kubenswrapper[4949]: I0216 12:03:03.796959 4949 generic.go:334] "Generic (PLEG): container finished" podID="6a13bbe6-0a94-4302-b276-5e643f747183" containerID="ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff" exitCode=0 Feb 16 12:03:03 crc kubenswrapper[4949]: I0216 12:03:03.797059 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f9r2s" event={"ID":"6a13bbe6-0a94-4302-b276-5e643f747183","Type":"ContainerDied","Data":"ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff"} Feb 16 12:03:04 crc kubenswrapper[4949]: I0216 12:03:04.815974 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f9r2s" event={"ID":"6a13bbe6-0a94-4302-b276-5e643f747183","Type":"ContainerStarted","Data":"5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085"} Feb 16 12:03:04 crc kubenswrapper[4949]: I0216 12:03:04.838022 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-f9r2s" podStartSLOduration=2.3901126599999998 podStartE2EDuration="4.837992498s" podCreationTimestamp="2026-02-16 12:03:00 +0000 UTC" firstStartedPulling="2026-02-16 12:03:01.774965687 +0000 UTC m=+3371.404299852" lastFinishedPulling="2026-02-16 12:03:04.222845525 +0000 UTC m=+3373.852179690" observedRunningTime="2026-02-16 12:03:04.83772647 +0000 UTC m=+3374.467060665" watchObservedRunningTime="2026-02-16 12:03:04.837992498 +0000 UTC m=+3374.467326703" Feb 16 12:03:06 crc kubenswrapper[4949]: E0216 12:03:06.236865 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:03:10 crc kubenswrapper[4949]: I0216 12:03:10.791452 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:10 crc kubenswrapper[4949]: I0216 12:03:10.793107 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:10 crc kubenswrapper[4949]: I0216 12:03:10.862504 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:10 crc kubenswrapper[4949]: I0216 12:03:10.947055 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:11 crc kubenswrapper[4949]: I0216 12:03:11.114847 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f9r2s"] Feb 16 12:03:12 crc kubenswrapper[4949]: I0216 12:03:12.907058 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-f9r2s" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" containerName="registry-server" containerID="cri-o://5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085" gracePeriod=2 Feb 16 12:03:13 crc kubenswrapper[4949]: E0216 12:03:13.236465 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.446810 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.518622 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vm7xc\" (UniqueName: \"kubernetes.io/projected/6a13bbe6-0a94-4302-b276-5e643f747183-kube-api-access-vm7xc\") pod \"6a13bbe6-0a94-4302-b276-5e643f747183\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.518742 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-catalog-content\") pod \"6a13bbe6-0a94-4302-b276-5e643f747183\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.518973 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-utilities\") pod \"6a13bbe6-0a94-4302-b276-5e643f747183\" (UID: \"6a13bbe6-0a94-4302-b276-5e643f747183\") " Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.524990 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-utilities" (OuterVolumeSpecName: "utilities") pod "6a13bbe6-0a94-4302-b276-5e643f747183" (UID: "6a13bbe6-0a94-4302-b276-5e643f747183"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.526020 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.539446 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a13bbe6-0a94-4302-b276-5e643f747183-kube-api-access-vm7xc" (OuterVolumeSpecName: "kube-api-access-vm7xc") pod "6a13bbe6-0a94-4302-b276-5e643f747183" (UID: "6a13bbe6-0a94-4302-b276-5e643f747183"). InnerVolumeSpecName "kube-api-access-vm7xc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.636722 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vm7xc\" (UniqueName: \"kubernetes.io/projected/6a13bbe6-0a94-4302-b276-5e643f747183-kube-api-access-vm7xc\") on node \"crc\" DevicePath \"\"" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.654385 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6a13bbe6-0a94-4302-b276-5e643f747183" (UID: "6a13bbe6-0a94-4302-b276-5e643f747183"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.739807 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a13bbe6-0a94-4302-b276-5e643f747183-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.917949 4949 generic.go:334] "Generic (PLEG): container finished" podID="6a13bbe6-0a94-4302-b276-5e643f747183" containerID="5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085" exitCode=0 Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.917992 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f9r2s" event={"ID":"6a13bbe6-0a94-4302-b276-5e643f747183","Type":"ContainerDied","Data":"5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085"} Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.918020 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f9r2s" event={"ID":"6a13bbe6-0a94-4302-b276-5e643f747183","Type":"ContainerDied","Data":"9a204385a5bbbe494a4b5a79e7776ef6d588b0a6c963ab39de4e20a7de19c74d"} Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.918039 4949 scope.go:117] "RemoveContainer" containerID="5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.919056 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f9r2s" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.947697 4949 scope.go:117] "RemoveContainer" containerID="ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff" Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.957792 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f9r2s"] Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.968443 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-f9r2s"] Feb 16 12:03:13 crc kubenswrapper[4949]: I0216 12:03:13.973971 4949 scope.go:117] "RemoveContainer" containerID="bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030" Feb 16 12:03:14 crc kubenswrapper[4949]: I0216 12:03:14.027091 4949 scope.go:117] "RemoveContainer" containerID="5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085" Feb 16 12:03:14 crc kubenswrapper[4949]: E0216 12:03:14.027633 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085\": container with ID starting with 5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085 not found: ID does not exist" containerID="5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085" Feb 16 12:03:14 crc kubenswrapper[4949]: I0216 12:03:14.027675 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085"} err="failed to get container status \"5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085\": rpc error: code = NotFound desc = could not find container \"5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085\": container with ID starting with 5ca77ea64d8ca023e0d4bdb79f38b72f42a769cd0c696f445a769dca6758f085 not found: ID does not exist" Feb 16 12:03:14 crc kubenswrapper[4949]: I0216 12:03:14.027703 4949 scope.go:117] "RemoveContainer" containerID="ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff" Feb 16 12:03:14 crc kubenswrapper[4949]: E0216 12:03:14.028015 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff\": container with ID starting with ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff not found: ID does not exist" containerID="ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff" Feb 16 12:03:14 crc kubenswrapper[4949]: I0216 12:03:14.028048 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff"} err="failed to get container status \"ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff\": rpc error: code = NotFound desc = could not find container \"ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff\": container with ID starting with ee3de3f1ac4009bb6f051afbe88ef944fa4a14902c7bd089f8b77044f849b3ff not found: ID does not exist" Feb 16 12:03:14 crc kubenswrapper[4949]: I0216 12:03:14.028068 4949 scope.go:117] "RemoveContainer" containerID="bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030" Feb 16 12:03:14 crc kubenswrapper[4949]: E0216 12:03:14.030807 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030\": container with ID starting with bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030 not found: ID does not exist" containerID="bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030" Feb 16 12:03:14 crc kubenswrapper[4949]: I0216 12:03:14.031032 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030"} err="failed to get container status \"bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030\": rpc error: code = NotFound desc = could not find container \"bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030\": container with ID starting with bd48897b22520b4593cc26e7e34b196279c3cd25264fbfdf747fe9d7c64b8030 not found: ID does not exist" Feb 16 12:03:15 crc kubenswrapper[4949]: I0216 12:03:15.251546 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" path="/var/lib/kubelet/pods/6a13bbe6-0a94-4302-b276-5e643f747183/volumes" Feb 16 12:03:20 crc kubenswrapper[4949]: E0216 12:03:20.238596 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:03:26 crc kubenswrapper[4949]: E0216 12:03:26.238755 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:03:33 crc kubenswrapper[4949]: E0216 12:03:33.238611 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:03:37 crc kubenswrapper[4949]: E0216 12:03:37.237982 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:03:47 crc kubenswrapper[4949]: E0216 12:03:47.237646 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:03:51 crc kubenswrapper[4949]: E0216 12:03:51.249157 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:03:59 crc kubenswrapper[4949]: E0216 12:03:59.238842 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:04:02 crc kubenswrapper[4949]: E0216 12:04:02.238424 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:04:13 crc kubenswrapper[4949]: E0216 12:04:13.238302 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:04:14 crc kubenswrapper[4949]: E0216 12:04:14.236851 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:04:27 crc kubenswrapper[4949]: E0216 12:04:27.238820 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:04:29 crc kubenswrapper[4949]: E0216 12:04:29.241462 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:04:34 crc kubenswrapper[4949]: I0216 12:04:34.550359 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:04:34 crc kubenswrapper[4949]: I0216 12:04:34.551063 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:04:39 crc kubenswrapper[4949]: E0216 12:04:39.238792 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:04:42 crc kubenswrapper[4949]: E0216 12:04:42.238697 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:04:53 crc kubenswrapper[4949]: E0216 12:04:53.237974 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:04:56 crc kubenswrapper[4949]: E0216 12:04:56.238108 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:05:04 crc kubenswrapper[4949]: I0216 12:05:04.549929 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:05:04 crc kubenswrapper[4949]: I0216 12:05:04.550449 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:05:06 crc kubenswrapper[4949]: E0216 12:05:06.237921 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:05:09 crc kubenswrapper[4949]: E0216 12:05:09.237883 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:05:18 crc kubenswrapper[4949]: E0216 12:05:18.238899 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:05:24 crc kubenswrapper[4949]: E0216 12:05:24.238875 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:05:31 crc kubenswrapper[4949]: E0216 12:05:31.245155 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:05:34 crc kubenswrapper[4949]: I0216 12:05:34.551017 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:05:34 crc kubenswrapper[4949]: I0216 12:05:34.551623 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:05:34 crc kubenswrapper[4949]: I0216 12:05:34.551678 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 12:05:34 crc kubenswrapper[4949]: I0216 12:05:34.552796 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"196d4332bced584ab96d6e55bf867c81b178dde9f045c9e3a178bad980c7d4fa"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 12:05:34 crc kubenswrapper[4949]: I0216 12:05:34.552864 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://196d4332bced584ab96d6e55bf867c81b178dde9f045c9e3a178bad980c7d4fa" gracePeriod=600 Feb 16 12:05:35 crc kubenswrapper[4949]: I0216 12:05:35.538122 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="196d4332bced584ab96d6e55bf867c81b178dde9f045c9e3a178bad980c7d4fa" exitCode=0 Feb 16 12:05:35 crc kubenswrapper[4949]: I0216 12:05:35.538202 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"196d4332bced584ab96d6e55bf867c81b178dde9f045c9e3a178bad980c7d4fa"} Feb 16 12:05:35 crc kubenswrapper[4949]: I0216 12:05:35.538457 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f"} Feb 16 12:05:35 crc kubenswrapper[4949]: I0216 12:05:35.538482 4949 scope.go:117] "RemoveContainer" containerID="35cd40897a1d60a6cb43b2203374997bed301e6499f7eb175eceea009e21eff6" Feb 16 12:05:39 crc kubenswrapper[4949]: E0216 12:05:39.238255 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:05:44 crc kubenswrapper[4949]: E0216 12:05:44.238352 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:05:53 crc kubenswrapper[4949]: E0216 12:05:53.239603 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:05:59 crc kubenswrapper[4949]: E0216 12:05:59.238154 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:06:07 crc kubenswrapper[4949]: E0216 12:06:07.239582 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:06:09 crc kubenswrapper[4949]: I0216 12:06:09.938702 4949 generic.go:334] "Generic (PLEG): container finished" podID="9991be76-b16a-4afd-bcc6-05dc7dfe9da1" containerID="0954ddbd1f76cbc204485986054c5e07737f0ee159361a4b92e157d0ae2799f9" exitCode=2 Feb 16 12:06:09 crc kubenswrapper[4949]: I0216 12:06:09.938776 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" event={"ID":"9991be76-b16a-4afd-bcc6-05dc7dfe9da1","Type":"ContainerDied","Data":"0954ddbd1f76cbc204485986054c5e07737f0ee159361a4b92e157d0ae2799f9"} Feb 16 12:06:10 crc kubenswrapper[4949]: E0216 12:06:10.239103 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.502600 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.603516 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvh8f\" (UniqueName: \"kubernetes.io/projected/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-kube-api-access-qvh8f\") pod \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.603938 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-ssh-key-openstack-edpm-ipam\") pod \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.604004 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-inventory\") pod \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\" (UID: \"9991be76-b16a-4afd-bcc6-05dc7dfe9da1\") " Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.612015 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-kube-api-access-qvh8f" (OuterVolumeSpecName: "kube-api-access-qvh8f") pod "9991be76-b16a-4afd-bcc6-05dc7dfe9da1" (UID: "9991be76-b16a-4afd-bcc6-05dc7dfe9da1"). InnerVolumeSpecName "kube-api-access-qvh8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.641003 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "9991be76-b16a-4afd-bcc6-05dc7dfe9da1" (UID: "9991be76-b16a-4afd-bcc6-05dc7dfe9da1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.651452 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-inventory" (OuterVolumeSpecName: "inventory") pod "9991be76-b16a-4afd-bcc6-05dc7dfe9da1" (UID: "9991be76-b16a-4afd-bcc6-05dc7dfe9da1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.708556 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvh8f\" (UniqueName: \"kubernetes.io/projected/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-kube-api-access-qvh8f\") on node \"crc\" DevicePath \"\"" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.708610 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.708625 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9991be76-b16a-4afd-bcc6-05dc7dfe9da1-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.962368 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" event={"ID":"9991be76-b16a-4afd-bcc6-05dc7dfe9da1","Type":"ContainerDied","Data":"02adfd99d962ebbcad3229e7d386472ce1e7e1a777cfb50867412bd19fc2b9f5"} Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.962612 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02adfd99d962ebbcad3229e7d386472ce1e7e1a777cfb50867412bd19fc2b9f5" Feb 16 12:06:11 crc kubenswrapper[4949]: I0216 12:06:11.962415 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gqs69" Feb 16 12:06:21 crc kubenswrapper[4949]: E0216 12:06:21.245562 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:06:23 crc kubenswrapper[4949]: E0216 12:06:23.237799 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:06:32 crc kubenswrapper[4949]: E0216 12:06:32.237462 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:06:36 crc kubenswrapper[4949]: I0216 12:06:36.243827 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:06:36 crc kubenswrapper[4949]: E0216 12:06:36.365196 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:06:36 crc kubenswrapper[4949]: E0216 12:06:36.365270 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:06:36 crc kubenswrapper[4949]: E0216 12:06:36.365409 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:06:36 crc kubenswrapper[4949]: E0216 12:06:36.366658 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:06:46 crc kubenswrapper[4949]: E0216 12:06:46.350857 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:06:46 crc kubenswrapper[4949]: E0216 12:06:46.351560 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:06:46 crc kubenswrapper[4949]: E0216 12:06:46.351733 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:06:46 crc kubenswrapper[4949]: E0216 12:06:46.353143 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:06:48 crc kubenswrapper[4949]: E0216 12:06:48.239646 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:07:00 crc kubenswrapper[4949]: E0216 12:07:00.240237 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:07:02 crc kubenswrapper[4949]: E0216 12:07:02.238917 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:07:13 crc kubenswrapper[4949]: E0216 12:07:13.239518 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:07:14 crc kubenswrapper[4949]: E0216 12:07:14.238511 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:07:25 crc kubenswrapper[4949]: E0216 12:07:25.238419 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:07:27 crc kubenswrapper[4949]: E0216 12:07:27.237866 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.036018 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj"] Feb 16 12:07:29 crc kubenswrapper[4949]: E0216 12:07:29.037448 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" containerName="extract-utilities" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.037469 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" containerName="extract-utilities" Feb 16 12:07:29 crc kubenswrapper[4949]: E0216 12:07:29.037494 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" containerName="registry-server" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.037506 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" containerName="registry-server" Feb 16 12:07:29 crc kubenswrapper[4949]: E0216 12:07:29.037521 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9991be76-b16a-4afd-bcc6-05dc7dfe9da1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.037531 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9991be76-b16a-4afd-bcc6-05dc7dfe9da1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:07:29 crc kubenswrapper[4949]: E0216 12:07:29.037591 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" containerName="extract-content" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.037601 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" containerName="extract-content" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.037902 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9991be76-b16a-4afd-bcc6-05dc7dfe9da1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.037922 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a13bbe6-0a94-4302-b276-5e643f747183" containerName="registry-server" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.038839 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.041384 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.041677 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.045872 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.046093 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.077614 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj"] Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.217464 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfzj8\" (UniqueName: \"kubernetes.io/projected/83b6a7c1-807f-4f19-b519-75879c54d0c5-kube-api-access-bfzj8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.217530 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.217627 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.319992 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfzj8\" (UniqueName: \"kubernetes.io/projected/83b6a7c1-807f-4f19-b519-75879c54d0c5-kube-api-access-bfzj8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.320099 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.320184 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.325864 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.326140 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.344484 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfzj8\" (UniqueName: \"kubernetes.io/projected/83b6a7c1-807f-4f19-b519-75879c54d0c5-kube-api-access-bfzj8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:29 crc kubenswrapper[4949]: I0216 12:07:29.378282 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:07:30 crc kubenswrapper[4949]: I0216 12:07:30.008948 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj"] Feb 16 12:07:30 crc kubenswrapper[4949]: I0216 12:07:30.844920 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" event={"ID":"83b6a7c1-807f-4f19-b519-75879c54d0c5","Type":"ContainerStarted","Data":"7eed7649a4e13d55185cadb72e9ee4199198b2ea8e504851124068cd070319fc"} Feb 16 12:07:30 crc kubenswrapper[4949]: I0216 12:07:30.845418 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" event={"ID":"83b6a7c1-807f-4f19-b519-75879c54d0c5","Type":"ContainerStarted","Data":"b47800ceee8e70d64b798ec52a195c98bb936820d87e1df2fd07dab951bda416"} Feb 16 12:07:30 crc kubenswrapper[4949]: I0216 12:07:30.867681 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" podStartSLOduration=1.303584608 podStartE2EDuration="1.867653745s" podCreationTimestamp="2026-02-16 12:07:29 +0000 UTC" firstStartedPulling="2026-02-16 12:07:30.01262661 +0000 UTC m=+3639.641960775" lastFinishedPulling="2026-02-16 12:07:30.576695747 +0000 UTC m=+3640.206029912" observedRunningTime="2026-02-16 12:07:30.862490647 +0000 UTC m=+3640.491824832" watchObservedRunningTime="2026-02-16 12:07:30.867653745 +0000 UTC m=+3640.496987920" Feb 16 12:07:34 crc kubenswrapper[4949]: I0216 12:07:34.550689 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:07:34 crc kubenswrapper[4949]: I0216 12:07:34.551358 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:07:38 crc kubenswrapper[4949]: E0216 12:07:38.240137 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:07:40 crc kubenswrapper[4949]: E0216 12:07:40.239957 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:07:49 crc kubenswrapper[4949]: E0216 12:07:49.240481 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:07:54 crc kubenswrapper[4949]: E0216 12:07:54.240938 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:08:04 crc kubenswrapper[4949]: E0216 12:08:04.241436 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:08:04 crc kubenswrapper[4949]: I0216 12:08:04.550826 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:08:04 crc kubenswrapper[4949]: I0216 12:08:04.550894 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:08:09 crc kubenswrapper[4949]: E0216 12:08:09.242852 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:08:19 crc kubenswrapper[4949]: E0216 12:08:19.239531 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:08:23 crc kubenswrapper[4949]: E0216 12:08:23.237767 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:08:31 crc kubenswrapper[4949]: E0216 12:08:31.260669 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:08:34 crc kubenswrapper[4949]: I0216 12:08:34.550957 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:08:34 crc kubenswrapper[4949]: I0216 12:08:34.551675 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:08:34 crc kubenswrapper[4949]: I0216 12:08:34.551745 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 12:08:34 crc kubenswrapper[4949]: I0216 12:08:34.552837 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 12:08:34 crc kubenswrapper[4949]: I0216 12:08:34.552927 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" gracePeriod=600 Feb 16 12:08:34 crc kubenswrapper[4949]: E0216 12:08:34.682066 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:08:35 crc kubenswrapper[4949]: I0216 12:08:35.598733 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" exitCode=0 Feb 16 12:08:35 crc kubenswrapper[4949]: I0216 12:08:35.598875 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f"} Feb 16 12:08:35 crc kubenswrapper[4949]: I0216 12:08:35.599133 4949 scope.go:117] "RemoveContainer" containerID="196d4332bced584ab96d6e55bf867c81b178dde9f045c9e3a178bad980c7d4fa" Feb 16 12:08:35 crc kubenswrapper[4949]: I0216 12:08:35.600207 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:08:35 crc kubenswrapper[4949]: E0216 12:08:35.600818 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:08:38 crc kubenswrapper[4949]: E0216 12:08:38.239988 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:08:42 crc kubenswrapper[4949]: E0216 12:08:42.237354 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.236509 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:08:48 crc kubenswrapper[4949]: E0216 12:08:48.237305 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.595122 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vpmfx"] Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.598377 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.616982 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpmfx"] Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.681966 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-utilities\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.682136 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jccml\" (UniqueName: \"kubernetes.io/projected/952c90c3-f812-40d4-ba3e-7d339bcd4e17-kube-api-access-jccml\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.682160 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-catalog-content\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.783930 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-utilities\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.784071 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jccml\" (UniqueName: \"kubernetes.io/projected/952c90c3-f812-40d4-ba3e-7d339bcd4e17-kube-api-access-jccml\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.784098 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-catalog-content\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.784605 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-utilities\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.784829 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-catalog-content\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.809954 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jccml\" (UniqueName: \"kubernetes.io/projected/952c90c3-f812-40d4-ba3e-7d339bcd4e17-kube-api-access-jccml\") pod \"redhat-marketplace-vpmfx\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:48 crc kubenswrapper[4949]: I0216 12:08:48.933148 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:49 crc kubenswrapper[4949]: E0216 12:08:49.249632 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:08:49 crc kubenswrapper[4949]: I0216 12:08:49.292496 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpmfx"] Feb 16 12:08:49 crc kubenswrapper[4949]: I0216 12:08:49.751625 4949 generic.go:334] "Generic (PLEG): container finished" podID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerID="c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5" exitCode=0 Feb 16 12:08:49 crc kubenswrapper[4949]: I0216 12:08:49.751706 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpmfx" event={"ID":"952c90c3-f812-40d4-ba3e-7d339bcd4e17","Type":"ContainerDied","Data":"c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5"} Feb 16 12:08:49 crc kubenswrapper[4949]: I0216 12:08:49.752327 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpmfx" event={"ID":"952c90c3-f812-40d4-ba3e-7d339bcd4e17","Type":"ContainerStarted","Data":"d5a3a9faa68226eddf7a1a4a33ead33ac2d2178f87b49a173834dd56f226e7b0"} Feb 16 12:08:51 crc kubenswrapper[4949]: I0216 12:08:51.777102 4949 generic.go:334] "Generic (PLEG): container finished" podID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerID="a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca" exitCode=0 Feb 16 12:08:51 crc kubenswrapper[4949]: I0216 12:08:51.777157 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpmfx" event={"ID":"952c90c3-f812-40d4-ba3e-7d339bcd4e17","Type":"ContainerDied","Data":"a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca"} Feb 16 12:08:52 crc kubenswrapper[4949]: I0216 12:08:52.791760 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpmfx" event={"ID":"952c90c3-f812-40d4-ba3e-7d339bcd4e17","Type":"ContainerStarted","Data":"48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81"} Feb 16 12:08:52 crc kubenswrapper[4949]: I0216 12:08:52.815307 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vpmfx" podStartSLOduration=2.34705765 podStartE2EDuration="4.81528634s" podCreationTimestamp="2026-02-16 12:08:48 +0000 UTC" firstStartedPulling="2026-02-16 12:08:49.753593415 +0000 UTC m=+3719.382927590" lastFinishedPulling="2026-02-16 12:08:52.221822105 +0000 UTC m=+3721.851156280" observedRunningTime="2026-02-16 12:08:52.812316375 +0000 UTC m=+3722.441650550" watchObservedRunningTime="2026-02-16 12:08:52.81528634 +0000 UTC m=+3722.444620505" Feb 16 12:08:56 crc kubenswrapper[4949]: E0216 12:08:56.238373 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:08:58 crc kubenswrapper[4949]: I0216 12:08:58.933924 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:58 crc kubenswrapper[4949]: I0216 12:08:58.934442 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:59 crc kubenswrapper[4949]: I0216 12:08:59.002615 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:59 crc kubenswrapper[4949]: I0216 12:08:59.941945 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:08:59 crc kubenswrapper[4949]: I0216 12:08:59.994773 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpmfx"] Feb 16 12:09:01 crc kubenswrapper[4949]: I0216 12:09:01.247819 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:09:01 crc kubenswrapper[4949]: E0216 12:09:01.249525 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:09:01 crc kubenswrapper[4949]: E0216 12:09:01.251230 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:09:01 crc kubenswrapper[4949]: I0216 12:09:01.880877 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vpmfx" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerName="registry-server" containerID="cri-o://48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81" gracePeriod=2 Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.426358 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.460512 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-catalog-content\") pod \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.461017 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-utilities\") pod \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.461097 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jccml\" (UniqueName: \"kubernetes.io/projected/952c90c3-f812-40d4-ba3e-7d339bcd4e17-kube-api-access-jccml\") pod \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\" (UID: \"952c90c3-f812-40d4-ba3e-7d339bcd4e17\") " Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.463196 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-utilities" (OuterVolumeSpecName: "utilities") pod "952c90c3-f812-40d4-ba3e-7d339bcd4e17" (UID: "952c90c3-f812-40d4-ba3e-7d339bcd4e17"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.476721 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/952c90c3-f812-40d4-ba3e-7d339bcd4e17-kube-api-access-jccml" (OuterVolumeSpecName: "kube-api-access-jccml") pod "952c90c3-f812-40d4-ba3e-7d339bcd4e17" (UID: "952c90c3-f812-40d4-ba3e-7d339bcd4e17"). InnerVolumeSpecName "kube-api-access-jccml". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.521060 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "952c90c3-f812-40d4-ba3e-7d339bcd4e17" (UID: "952c90c3-f812-40d4-ba3e-7d339bcd4e17"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.564987 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.565029 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jccml\" (UniqueName: \"kubernetes.io/projected/952c90c3-f812-40d4-ba3e-7d339bcd4e17-kube-api-access-jccml\") on node \"crc\" DevicePath \"\"" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.565042 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/952c90c3-f812-40d4-ba3e-7d339bcd4e17-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.893128 4949 generic.go:334] "Generic (PLEG): container finished" podID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerID="48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81" exitCode=0 Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.893197 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpmfx" event={"ID":"952c90c3-f812-40d4-ba3e-7d339bcd4e17","Type":"ContainerDied","Data":"48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81"} Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.893232 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vpmfx" event={"ID":"952c90c3-f812-40d4-ba3e-7d339bcd4e17","Type":"ContainerDied","Data":"d5a3a9faa68226eddf7a1a4a33ead33ac2d2178f87b49a173834dd56f226e7b0"} Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.893253 4949 scope.go:117] "RemoveContainer" containerID="48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.893472 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vpmfx" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.921629 4949 scope.go:117] "RemoveContainer" containerID="a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.951792 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpmfx"] Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.982656 4949 scope.go:117] "RemoveContainer" containerID="c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5" Feb 16 12:09:02 crc kubenswrapper[4949]: I0216 12:09:02.991844 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vpmfx"] Feb 16 12:09:03 crc kubenswrapper[4949]: I0216 12:09:03.049441 4949 scope.go:117] "RemoveContainer" containerID="48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81" Feb 16 12:09:03 crc kubenswrapper[4949]: E0216 12:09:03.052678 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81\": container with ID starting with 48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81 not found: ID does not exist" containerID="48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81" Feb 16 12:09:03 crc kubenswrapper[4949]: I0216 12:09:03.052726 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81"} err="failed to get container status \"48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81\": rpc error: code = NotFound desc = could not find container \"48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81\": container with ID starting with 48261db9ea614b8c9a010bac94e67f5252f6be89543dc87a2d4f7f7a20fdba81 not found: ID does not exist" Feb 16 12:09:03 crc kubenswrapper[4949]: I0216 12:09:03.052758 4949 scope.go:117] "RemoveContainer" containerID="a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca" Feb 16 12:09:03 crc kubenswrapper[4949]: E0216 12:09:03.053210 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca\": container with ID starting with a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca not found: ID does not exist" containerID="a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca" Feb 16 12:09:03 crc kubenswrapper[4949]: I0216 12:09:03.053261 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca"} err="failed to get container status \"a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca\": rpc error: code = NotFound desc = could not find container \"a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca\": container with ID starting with a7f9c0572f1265a10c800227d350b80ae52fd3395f4229de89817eecdc6f7bca not found: ID does not exist" Feb 16 12:09:03 crc kubenswrapper[4949]: I0216 12:09:03.053289 4949 scope.go:117] "RemoveContainer" containerID="c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5" Feb 16 12:09:03 crc kubenswrapper[4949]: E0216 12:09:03.053592 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5\": container with ID starting with c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5 not found: ID does not exist" containerID="c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5" Feb 16 12:09:03 crc kubenswrapper[4949]: I0216 12:09:03.053622 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5"} err="failed to get container status \"c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5\": rpc error: code = NotFound desc = could not find container \"c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5\": container with ID starting with c35c2a13f70f65d8aeb952e678f71e006a1f97eecf11027591ec78e6b149ccf5 not found: ID does not exist" Feb 16 12:09:03 crc kubenswrapper[4949]: I0216 12:09:03.252328 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" path="/var/lib/kubelet/pods/952c90c3-f812-40d4-ba3e-7d339bcd4e17/volumes" Feb 16 12:09:09 crc kubenswrapper[4949]: E0216 12:09:09.241568 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:09:12 crc kubenswrapper[4949]: E0216 12:09:12.238942 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:09:15 crc kubenswrapper[4949]: I0216 12:09:15.236527 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:09:15 crc kubenswrapper[4949]: E0216 12:09:15.237498 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:09:22 crc kubenswrapper[4949]: E0216 12:09:22.239544 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:09:23 crc kubenswrapper[4949]: E0216 12:09:23.237544 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:09:30 crc kubenswrapper[4949]: I0216 12:09:30.235776 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:09:30 crc kubenswrapper[4949]: E0216 12:09:30.237320 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:09:34 crc kubenswrapper[4949]: E0216 12:09:34.237535 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:09:36 crc kubenswrapper[4949]: E0216 12:09:36.237667 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:09:41 crc kubenswrapper[4949]: I0216 12:09:41.246716 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:09:41 crc kubenswrapper[4949]: E0216 12:09:41.247859 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:09:45 crc kubenswrapper[4949]: E0216 12:09:45.247507 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:09:48 crc kubenswrapper[4949]: E0216 12:09:48.238305 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:09:53 crc kubenswrapper[4949]: I0216 12:09:53.235739 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:09:53 crc kubenswrapper[4949]: E0216 12:09:53.236675 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:09:59 crc kubenswrapper[4949]: E0216 12:09:59.240901 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:10:00 crc kubenswrapper[4949]: E0216 12:10:00.239135 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:10:04 crc kubenswrapper[4949]: I0216 12:10:04.236903 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:10:04 crc kubenswrapper[4949]: E0216 12:10:04.238590 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:10:10 crc kubenswrapper[4949]: E0216 12:10:10.237754 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:10:15 crc kubenswrapper[4949]: E0216 12:10:15.240413 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:10:16 crc kubenswrapper[4949]: I0216 12:10:16.236498 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:10:16 crc kubenswrapper[4949]: E0216 12:10:16.237463 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:10:24 crc kubenswrapper[4949]: E0216 12:10:24.238261 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:10:26 crc kubenswrapper[4949]: E0216 12:10:26.238478 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:10:31 crc kubenswrapper[4949]: I0216 12:10:31.248566 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:10:31 crc kubenswrapper[4949]: E0216 12:10:31.249623 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:10:37 crc kubenswrapper[4949]: E0216 12:10:37.240504 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:10:38 crc kubenswrapper[4949]: E0216 12:10:38.237443 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:10:44 crc kubenswrapper[4949]: I0216 12:10:44.236288 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:10:44 crc kubenswrapper[4949]: E0216 12:10:44.237229 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:10:48 crc kubenswrapper[4949]: E0216 12:10:48.238148 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:10:52 crc kubenswrapper[4949]: E0216 12:10:52.239044 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:10:57 crc kubenswrapper[4949]: I0216 12:10:57.235719 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:10:57 crc kubenswrapper[4949]: E0216 12:10:57.237689 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:10:59 crc kubenswrapper[4949]: E0216 12:10:59.239463 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:11:06 crc kubenswrapper[4949]: I0216 12:11:06.883082 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-h9hfp"] Feb 16 12:11:06 crc kubenswrapper[4949]: E0216 12:11:06.885952 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerName="extract-content" Feb 16 12:11:06 crc kubenswrapper[4949]: I0216 12:11:06.886126 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerName="extract-content" Feb 16 12:11:06 crc kubenswrapper[4949]: E0216 12:11:06.886380 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerName="extract-utilities" Feb 16 12:11:06 crc kubenswrapper[4949]: I0216 12:11:06.886552 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerName="extract-utilities" Feb 16 12:11:06 crc kubenswrapper[4949]: E0216 12:11:06.886733 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerName="registry-server" Feb 16 12:11:06 crc kubenswrapper[4949]: I0216 12:11:06.887041 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerName="registry-server" Feb 16 12:11:06 crc kubenswrapper[4949]: I0216 12:11:06.887631 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="952c90c3-f812-40d4-ba3e-7d339bcd4e17" containerName="registry-server" Feb 16 12:11:06 crc kubenswrapper[4949]: I0216 12:11:06.891819 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:06 crc kubenswrapper[4949]: I0216 12:11:06.896784 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h9hfp"] Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.034839 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-catalog-content\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.035301 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-utilities\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.035647 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7n5qb\" (UniqueName: \"kubernetes.io/projected/b966c642-ee42-40af-8341-847b728ccb3d-kube-api-access-7n5qb\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.138375 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-utilities\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.138850 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7n5qb\" (UniqueName: \"kubernetes.io/projected/b966c642-ee42-40af-8341-847b728ccb3d-kube-api-access-7n5qb\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.139020 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-utilities\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.139249 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-catalog-content\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.139630 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-catalog-content\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.163655 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7n5qb\" (UniqueName: \"kubernetes.io/projected/b966c642-ee42-40af-8341-847b728ccb3d-kube-api-access-7n5qb\") pod \"certified-operators-h9hfp\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: E0216 12:11:07.245325 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.246657 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:07 crc kubenswrapper[4949]: I0216 12:11:07.897449 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h9hfp"] Feb 16 12:11:08 crc kubenswrapper[4949]: I0216 12:11:08.311249 4949 generic.go:334] "Generic (PLEG): container finished" podID="b966c642-ee42-40af-8341-847b728ccb3d" containerID="9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4" exitCode=0 Feb 16 12:11:08 crc kubenswrapper[4949]: I0216 12:11:08.311316 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h9hfp" event={"ID":"b966c642-ee42-40af-8341-847b728ccb3d","Type":"ContainerDied","Data":"9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4"} Feb 16 12:11:08 crc kubenswrapper[4949]: I0216 12:11:08.311611 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h9hfp" event={"ID":"b966c642-ee42-40af-8341-847b728ccb3d","Type":"ContainerStarted","Data":"7b16b8968cd2fc899b8263c52f3335dfe2de175a22fe95dbc1607854151d27a1"} Feb 16 12:11:10 crc kubenswrapper[4949]: I0216 12:11:10.344722 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h9hfp" event={"ID":"b966c642-ee42-40af-8341-847b728ccb3d","Type":"ContainerStarted","Data":"ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f"} Feb 16 12:11:11 crc kubenswrapper[4949]: I0216 12:11:11.252267 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:11:11 crc kubenswrapper[4949]: E0216 12:11:11.253807 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:11:11 crc kubenswrapper[4949]: I0216 12:11:11.355923 4949 generic.go:334] "Generic (PLEG): container finished" podID="b966c642-ee42-40af-8341-847b728ccb3d" containerID="ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f" exitCode=0 Feb 16 12:11:11 crc kubenswrapper[4949]: I0216 12:11:11.355967 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h9hfp" event={"ID":"b966c642-ee42-40af-8341-847b728ccb3d","Type":"ContainerDied","Data":"ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f"} Feb 16 12:11:12 crc kubenswrapper[4949]: E0216 12:11:12.237754 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:11:12 crc kubenswrapper[4949]: I0216 12:11:12.374415 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h9hfp" event={"ID":"b966c642-ee42-40af-8341-847b728ccb3d","Type":"ContainerStarted","Data":"f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719"} Feb 16 12:11:12 crc kubenswrapper[4949]: I0216 12:11:12.404743 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-h9hfp" podStartSLOduration=2.919943125 podStartE2EDuration="6.404726336s" podCreationTimestamp="2026-02-16 12:11:06 +0000 UTC" firstStartedPulling="2026-02-16 12:11:08.313094378 +0000 UTC m=+3857.942428543" lastFinishedPulling="2026-02-16 12:11:11.797877569 +0000 UTC m=+3861.427211754" observedRunningTime="2026-02-16 12:11:12.402856842 +0000 UTC m=+3862.032191047" watchObservedRunningTime="2026-02-16 12:11:12.404726336 +0000 UTC m=+3862.034060501" Feb 16 12:11:17 crc kubenswrapper[4949]: I0216 12:11:17.247105 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:17 crc kubenswrapper[4949]: I0216 12:11:17.247573 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:17 crc kubenswrapper[4949]: I0216 12:11:17.315249 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:17 crc kubenswrapper[4949]: I0216 12:11:17.479649 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:17 crc kubenswrapper[4949]: I0216 12:11:17.557702 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h9hfp"] Feb 16 12:11:19 crc kubenswrapper[4949]: I0216 12:11:19.452590 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-h9hfp" podUID="b966c642-ee42-40af-8341-847b728ccb3d" containerName="registry-server" containerID="cri-o://f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719" gracePeriod=2 Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.026783 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.108892 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-utilities\") pod \"b966c642-ee42-40af-8341-847b728ccb3d\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.110421 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-catalog-content\") pod \"b966c642-ee42-40af-8341-847b728ccb3d\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.110499 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7n5qb\" (UniqueName: \"kubernetes.io/projected/b966c642-ee42-40af-8341-847b728ccb3d-kube-api-access-7n5qb\") pod \"b966c642-ee42-40af-8341-847b728ccb3d\" (UID: \"b966c642-ee42-40af-8341-847b728ccb3d\") " Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.110307 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-utilities" (OuterVolumeSpecName: "utilities") pod "b966c642-ee42-40af-8341-847b728ccb3d" (UID: "b966c642-ee42-40af-8341-847b728ccb3d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.113367 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.116472 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b966c642-ee42-40af-8341-847b728ccb3d-kube-api-access-7n5qb" (OuterVolumeSpecName: "kube-api-access-7n5qb") pod "b966c642-ee42-40af-8341-847b728ccb3d" (UID: "b966c642-ee42-40af-8341-847b728ccb3d"). InnerVolumeSpecName "kube-api-access-7n5qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.160605 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b966c642-ee42-40af-8341-847b728ccb3d" (UID: "b966c642-ee42-40af-8341-847b728ccb3d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.215241 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b966c642-ee42-40af-8341-847b728ccb3d-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.215270 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7n5qb\" (UniqueName: \"kubernetes.io/projected/b966c642-ee42-40af-8341-847b728ccb3d-kube-api-access-7n5qb\") on node \"crc\" DevicePath \"\"" Feb 16 12:11:20 crc kubenswrapper[4949]: E0216 12:11:20.237586 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.466920 4949 generic.go:334] "Generic (PLEG): container finished" podID="b966c642-ee42-40af-8341-847b728ccb3d" containerID="f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719" exitCode=0 Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.466965 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h9hfp" event={"ID":"b966c642-ee42-40af-8341-847b728ccb3d","Type":"ContainerDied","Data":"f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719"} Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.466992 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h9hfp" event={"ID":"b966c642-ee42-40af-8341-847b728ccb3d","Type":"ContainerDied","Data":"7b16b8968cd2fc899b8263c52f3335dfe2de175a22fe95dbc1607854151d27a1"} Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.467009 4949 scope.go:117] "RemoveContainer" containerID="f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.467007 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h9hfp" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.489402 4949 scope.go:117] "RemoveContainer" containerID="ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.510320 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h9hfp"] Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.521553 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-h9hfp"] Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.540433 4949 scope.go:117] "RemoveContainer" containerID="9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.579121 4949 scope.go:117] "RemoveContainer" containerID="f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719" Feb 16 12:11:20 crc kubenswrapper[4949]: E0216 12:11:20.579578 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719\": container with ID starting with f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719 not found: ID does not exist" containerID="f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.579631 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719"} err="failed to get container status \"f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719\": rpc error: code = NotFound desc = could not find container \"f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719\": container with ID starting with f1c41c90161d6e75eafb7f44f33699f95fc7e94bda9fa9e58cb76f2d9c101719 not found: ID does not exist" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.579656 4949 scope.go:117] "RemoveContainer" containerID="ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f" Feb 16 12:11:20 crc kubenswrapper[4949]: E0216 12:11:20.579963 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f\": container with ID starting with ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f not found: ID does not exist" containerID="ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.580020 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f"} err="failed to get container status \"ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f\": rpc error: code = NotFound desc = could not find container \"ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f\": container with ID starting with ff565823562c3ce74a8f06c54f4623873492cd39e6b2f379f616309ae1f7739f not found: ID does not exist" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.580058 4949 scope.go:117] "RemoveContainer" containerID="9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4" Feb 16 12:11:20 crc kubenswrapper[4949]: E0216 12:11:20.580449 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4\": container with ID starting with 9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4 not found: ID does not exist" containerID="9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4" Feb 16 12:11:20 crc kubenswrapper[4949]: I0216 12:11:20.580535 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4"} err="failed to get container status \"9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4\": rpc error: code = NotFound desc = could not find container \"9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4\": container with ID starting with 9e51a62f46ee5a8b8cc36143733624fd0d9401ebdcdeeb35206eb465978fcef4 not found: ID does not exist" Feb 16 12:11:21 crc kubenswrapper[4949]: I0216 12:11:21.250240 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b966c642-ee42-40af-8341-847b728ccb3d" path="/var/lib/kubelet/pods/b966c642-ee42-40af-8341-847b728ccb3d/volumes" Feb 16 12:11:22 crc kubenswrapper[4949]: I0216 12:11:22.236100 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:11:22 crc kubenswrapper[4949]: E0216 12:11:22.237090 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:11:23 crc kubenswrapper[4949]: E0216 12:11:23.237989 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:11:34 crc kubenswrapper[4949]: E0216 12:11:34.237264 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:11:35 crc kubenswrapper[4949]: E0216 12:11:35.239571 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:11:36 crc kubenswrapper[4949]: I0216 12:11:36.235606 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:11:36 crc kubenswrapper[4949]: E0216 12:11:36.236381 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:11:45 crc kubenswrapper[4949]: I0216 12:11:45.238738 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:11:45 crc kubenswrapper[4949]: E0216 12:11:45.393574 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:11:45 crc kubenswrapper[4949]: E0216 12:11:45.393651 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:11:45 crc kubenswrapper[4949]: E0216 12:11:45.393824 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:11:45 crc kubenswrapper[4949]: E0216 12:11:45.395117 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:11:47 crc kubenswrapper[4949]: E0216 12:11:47.362595 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:11:47 crc kubenswrapper[4949]: E0216 12:11:47.362955 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:11:47 crc kubenswrapper[4949]: E0216 12:11:47.363850 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:11:47 crc kubenswrapper[4949]: E0216 12:11:47.365887 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.686113 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kwngt"] Feb 16 12:11:47 crc kubenswrapper[4949]: E0216 12:11:47.686968 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b966c642-ee42-40af-8341-847b728ccb3d" containerName="extract-content" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.687005 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b966c642-ee42-40af-8341-847b728ccb3d" containerName="extract-content" Feb 16 12:11:47 crc kubenswrapper[4949]: E0216 12:11:47.687055 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b966c642-ee42-40af-8341-847b728ccb3d" containerName="registry-server" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.687069 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b966c642-ee42-40af-8341-847b728ccb3d" containerName="registry-server" Feb 16 12:11:47 crc kubenswrapper[4949]: E0216 12:11:47.687134 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b966c642-ee42-40af-8341-847b728ccb3d" containerName="extract-utilities" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.687149 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b966c642-ee42-40af-8341-847b728ccb3d" containerName="extract-utilities" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.687692 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b966c642-ee42-40af-8341-847b728ccb3d" containerName="registry-server" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.692313 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.701008 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kwngt"] Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.849457 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-catalog-content\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.849637 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69mf7\" (UniqueName: \"kubernetes.io/projected/499efb29-78c5-4e94-aa8a-7b11a42d8b55-kube-api-access-69mf7\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.849665 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-utilities\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.951511 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-catalog-content\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.951694 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69mf7\" (UniqueName: \"kubernetes.io/projected/499efb29-78c5-4e94-aa8a-7b11a42d8b55-kube-api-access-69mf7\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.951729 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-utilities\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.951988 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-catalog-content\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.952263 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-utilities\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:47 crc kubenswrapper[4949]: I0216 12:11:47.972570 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69mf7\" (UniqueName: \"kubernetes.io/projected/499efb29-78c5-4e94-aa8a-7b11a42d8b55-kube-api-access-69mf7\") pod \"redhat-operators-kwngt\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:48 crc kubenswrapper[4949]: I0216 12:11:48.046663 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:48 crc kubenswrapper[4949]: I0216 12:11:48.527888 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kwngt"] Feb 16 12:11:48 crc kubenswrapper[4949]: I0216 12:11:48.790083 4949 generic.go:334] "Generic (PLEG): container finished" podID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerID="07cb0f83b68d4e38191bc7f4f3c680acf1868b9b2d3940700d682ec226956478" exitCode=0 Feb 16 12:11:48 crc kubenswrapper[4949]: I0216 12:11:48.790122 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kwngt" event={"ID":"499efb29-78c5-4e94-aa8a-7b11a42d8b55","Type":"ContainerDied","Data":"07cb0f83b68d4e38191bc7f4f3c680acf1868b9b2d3940700d682ec226956478"} Feb 16 12:11:48 crc kubenswrapper[4949]: I0216 12:11:48.790146 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kwngt" event={"ID":"499efb29-78c5-4e94-aa8a-7b11a42d8b55","Type":"ContainerStarted","Data":"f1f0de541da3186ea01f1a0b6c5703f089f011d80f90c913da855337ebe8e92a"} Feb 16 12:11:50 crc kubenswrapper[4949]: I0216 12:11:50.810733 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kwngt" event={"ID":"499efb29-78c5-4e94-aa8a-7b11a42d8b55","Type":"ContainerStarted","Data":"2f941d56c87795f1855c9955a7e57c21991133470ce8490718058be5b070af73"} Feb 16 12:11:51 crc kubenswrapper[4949]: I0216 12:11:51.242510 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:11:51 crc kubenswrapper[4949]: E0216 12:11:51.243099 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:11:55 crc kubenswrapper[4949]: I0216 12:11:55.872509 4949 generic.go:334] "Generic (PLEG): container finished" podID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerID="2f941d56c87795f1855c9955a7e57c21991133470ce8490718058be5b070af73" exitCode=0 Feb 16 12:11:55 crc kubenswrapper[4949]: I0216 12:11:55.872816 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kwngt" event={"ID":"499efb29-78c5-4e94-aa8a-7b11a42d8b55","Type":"ContainerDied","Data":"2f941d56c87795f1855c9955a7e57c21991133470ce8490718058be5b070af73"} Feb 16 12:11:56 crc kubenswrapper[4949]: E0216 12:11:56.237528 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:11:57 crc kubenswrapper[4949]: I0216 12:11:57.901910 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kwngt" event={"ID":"499efb29-78c5-4e94-aa8a-7b11a42d8b55","Type":"ContainerStarted","Data":"78395b51fb5bd727b05c079948026c0139008e015ca3b426448dcfd93af5457e"} Feb 16 12:11:57 crc kubenswrapper[4949]: I0216 12:11:57.932873 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kwngt" podStartSLOduration=3.390551748 podStartE2EDuration="10.932814392s" podCreationTimestamp="2026-02-16 12:11:47 +0000 UTC" firstStartedPulling="2026-02-16 12:11:48.793046619 +0000 UTC m=+3898.422380784" lastFinishedPulling="2026-02-16 12:11:56.335309223 +0000 UTC m=+3905.964643428" observedRunningTime="2026-02-16 12:11:57.919337028 +0000 UTC m=+3907.548671233" watchObservedRunningTime="2026-02-16 12:11:57.932814392 +0000 UTC m=+3907.562148567" Feb 16 12:11:58 crc kubenswrapper[4949]: I0216 12:11:58.047471 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:58 crc kubenswrapper[4949]: I0216 12:11:58.047531 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:11:58 crc kubenswrapper[4949]: E0216 12:11:58.238849 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:11:59 crc kubenswrapper[4949]: I0216 12:11:59.114696 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kwngt" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="registry-server" probeResult="failure" output=< Feb 16 12:11:59 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 12:11:59 crc kubenswrapper[4949]: > Feb 16 12:12:04 crc kubenswrapper[4949]: I0216 12:12:04.235221 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:12:04 crc kubenswrapper[4949]: E0216 12:12:04.235904 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:12:08 crc kubenswrapper[4949]: E0216 12:12:08.237456 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:12:09 crc kubenswrapper[4949]: I0216 12:12:09.124119 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kwngt" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="registry-server" probeResult="failure" output=< Feb 16 12:12:09 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 12:12:09 crc kubenswrapper[4949]: > Feb 16 12:12:13 crc kubenswrapper[4949]: E0216 12:12:13.238154 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:12:17 crc kubenswrapper[4949]: I0216 12:12:17.235466 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:12:17 crc kubenswrapper[4949]: E0216 12:12:17.236439 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:12:19 crc kubenswrapper[4949]: I0216 12:12:19.128454 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kwngt" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="registry-server" probeResult="failure" output=< Feb 16 12:12:19 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 12:12:19 crc kubenswrapper[4949]: > Feb 16 12:12:22 crc kubenswrapper[4949]: E0216 12:12:22.237381 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:12:24 crc kubenswrapper[4949]: E0216 12:12:24.240520 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:12:28 crc kubenswrapper[4949]: I0216 12:12:28.113923 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:12:28 crc kubenswrapper[4949]: I0216 12:12:28.172048 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:12:28 crc kubenswrapper[4949]: I0216 12:12:28.236037 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:12:28 crc kubenswrapper[4949]: E0216 12:12:28.236353 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:12:28 crc kubenswrapper[4949]: I0216 12:12:28.634094 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kwngt"] Feb 16 12:12:29 crc kubenswrapper[4949]: I0216 12:12:29.322851 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kwngt" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="registry-server" containerID="cri-o://78395b51fb5bd727b05c079948026c0139008e015ca3b426448dcfd93af5457e" gracePeriod=2 Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.354942 4949 generic.go:334] "Generic (PLEG): container finished" podID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerID="78395b51fb5bd727b05c079948026c0139008e015ca3b426448dcfd93af5457e" exitCode=0 Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.354995 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kwngt" event={"ID":"499efb29-78c5-4e94-aa8a-7b11a42d8b55","Type":"ContainerDied","Data":"78395b51fb5bd727b05c079948026c0139008e015ca3b426448dcfd93af5457e"} Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.639237 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.772233 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69mf7\" (UniqueName: \"kubernetes.io/projected/499efb29-78c5-4e94-aa8a-7b11a42d8b55-kube-api-access-69mf7\") pod \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.772422 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-utilities\") pod \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.772479 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-catalog-content\") pod \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\" (UID: \"499efb29-78c5-4e94-aa8a-7b11a42d8b55\") " Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.773500 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-utilities" (OuterVolumeSpecName: "utilities") pod "499efb29-78c5-4e94-aa8a-7b11a42d8b55" (UID: "499efb29-78c5-4e94-aa8a-7b11a42d8b55"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.777393 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/499efb29-78c5-4e94-aa8a-7b11a42d8b55-kube-api-access-69mf7" (OuterVolumeSpecName: "kube-api-access-69mf7") pod "499efb29-78c5-4e94-aa8a-7b11a42d8b55" (UID: "499efb29-78c5-4e94-aa8a-7b11a42d8b55"). InnerVolumeSpecName "kube-api-access-69mf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.875774 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69mf7\" (UniqueName: \"kubernetes.io/projected/499efb29-78c5-4e94-aa8a-7b11a42d8b55-kube-api-access-69mf7\") on node \"crc\" DevicePath \"\"" Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.875807 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.892857 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "499efb29-78c5-4e94-aa8a-7b11a42d8b55" (UID: "499efb29-78c5-4e94-aa8a-7b11a42d8b55"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:12:30 crc kubenswrapper[4949]: I0216 12:12:30.978443 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/499efb29-78c5-4e94-aa8a-7b11a42d8b55-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:12:31 crc kubenswrapper[4949]: I0216 12:12:31.367851 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kwngt" event={"ID":"499efb29-78c5-4e94-aa8a-7b11a42d8b55","Type":"ContainerDied","Data":"f1f0de541da3186ea01f1a0b6c5703f089f011d80f90c913da855337ebe8e92a"} Feb 16 12:12:31 crc kubenswrapper[4949]: I0216 12:12:31.367907 4949 scope.go:117] "RemoveContainer" containerID="78395b51fb5bd727b05c079948026c0139008e015ca3b426448dcfd93af5457e" Feb 16 12:12:31 crc kubenswrapper[4949]: I0216 12:12:31.367926 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kwngt" Feb 16 12:12:31 crc kubenswrapper[4949]: I0216 12:12:31.398632 4949 scope.go:117] "RemoveContainer" containerID="2f941d56c87795f1855c9955a7e57c21991133470ce8490718058be5b070af73" Feb 16 12:12:31 crc kubenswrapper[4949]: I0216 12:12:31.404338 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kwngt"] Feb 16 12:12:31 crc kubenswrapper[4949]: I0216 12:12:31.418294 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kwngt"] Feb 16 12:12:31 crc kubenswrapper[4949]: I0216 12:12:31.438343 4949 scope.go:117] "RemoveContainer" containerID="07cb0f83b68d4e38191bc7f4f3c680acf1868b9b2d3940700d682ec226956478" Feb 16 12:12:33 crc kubenswrapper[4949]: I0216 12:12:33.253836 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" path="/var/lib/kubelet/pods/499efb29-78c5-4e94-aa8a-7b11a42d8b55/volumes" Feb 16 12:12:34 crc kubenswrapper[4949]: E0216 12:12:34.238402 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:12:36 crc kubenswrapper[4949]: E0216 12:12:36.237888 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:12:42 crc kubenswrapper[4949]: I0216 12:12:42.235914 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:12:42 crc kubenswrapper[4949]: E0216 12:12:42.236733 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:12:47 crc kubenswrapper[4949]: E0216 12:12:47.238330 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:12:48 crc kubenswrapper[4949]: E0216 12:12:48.237150 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:12:54 crc kubenswrapper[4949]: I0216 12:12:54.235158 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:12:54 crc kubenswrapper[4949]: E0216 12:12:54.236049 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:12:59 crc kubenswrapper[4949]: E0216 12:12:59.245519 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:13:00 crc kubenswrapper[4949]: E0216 12:13:00.237654 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:13:08 crc kubenswrapper[4949]: I0216 12:13:08.235586 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:13:08 crc kubenswrapper[4949]: E0216 12:13:08.236369 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:13:12 crc kubenswrapper[4949]: I0216 12:13:12.905276 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pz59m"] Feb 16 12:13:12 crc kubenswrapper[4949]: E0216 12:13:12.906679 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="extract-utilities" Feb 16 12:13:12 crc kubenswrapper[4949]: I0216 12:13:12.906705 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="extract-utilities" Feb 16 12:13:12 crc kubenswrapper[4949]: E0216 12:13:12.906736 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="registry-server" Feb 16 12:13:12 crc kubenswrapper[4949]: I0216 12:13:12.906748 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="registry-server" Feb 16 12:13:12 crc kubenswrapper[4949]: E0216 12:13:12.906775 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="extract-content" Feb 16 12:13:12 crc kubenswrapper[4949]: I0216 12:13:12.906788 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="extract-content" Feb 16 12:13:12 crc kubenswrapper[4949]: I0216 12:13:12.907231 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="499efb29-78c5-4e94-aa8a-7b11a42d8b55" containerName="registry-server" Feb 16 12:13:12 crc kubenswrapper[4949]: I0216 12:13:12.909802 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:12 crc kubenswrapper[4949]: I0216 12:13:12.918677 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pz59m"] Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.010787 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-utilities\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.011180 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxphl\" (UniqueName: \"kubernetes.io/projected/6de6074f-bf93-4ca6-8355-a5de759e7169-kube-api-access-wxphl\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.011244 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-catalog-content\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.113394 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-utilities\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.113579 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxphl\" (UniqueName: \"kubernetes.io/projected/6de6074f-bf93-4ca6-8355-a5de759e7169-kube-api-access-wxphl\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.113649 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-catalog-content\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.113993 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-utilities\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.114072 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-catalog-content\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: E0216 12:13:13.238559 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.625273 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxphl\" (UniqueName: \"kubernetes.io/projected/6de6074f-bf93-4ca6-8355-a5de759e7169-kube-api-access-wxphl\") pod \"community-operators-pz59m\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:13 crc kubenswrapper[4949]: I0216 12:13:13.853188 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:14 crc kubenswrapper[4949]: E0216 12:13:14.268409 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:13:14 crc kubenswrapper[4949]: I0216 12:13:14.402449 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pz59m"] Feb 16 12:13:14 crc kubenswrapper[4949]: I0216 12:13:14.907032 4949 generic.go:334] "Generic (PLEG): container finished" podID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerID="93949ce75e295c60fb57f71ff72e4810842954beebdf6698520a72bf3671d19c" exitCode=0 Feb 16 12:13:14 crc kubenswrapper[4949]: I0216 12:13:14.907124 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pz59m" event={"ID":"6de6074f-bf93-4ca6-8355-a5de759e7169","Type":"ContainerDied","Data":"93949ce75e295c60fb57f71ff72e4810842954beebdf6698520a72bf3671d19c"} Feb 16 12:13:14 crc kubenswrapper[4949]: I0216 12:13:14.907411 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pz59m" event={"ID":"6de6074f-bf93-4ca6-8355-a5de759e7169","Type":"ContainerStarted","Data":"1f5544b393c33f4bdcc796c06f1116d5b2300cdfded8fe6ebe0efa553fe7fe92"} Feb 16 12:13:16 crc kubenswrapper[4949]: I0216 12:13:16.931895 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pz59m" event={"ID":"6de6074f-bf93-4ca6-8355-a5de759e7169","Type":"ContainerStarted","Data":"110fdb62abafd7e94a076b841f81ed50614227e2641ab2f348d49af572f3c27e"} Feb 16 12:13:17 crc kubenswrapper[4949]: I0216 12:13:17.944559 4949 generic.go:334] "Generic (PLEG): container finished" podID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerID="110fdb62abafd7e94a076b841f81ed50614227e2641ab2f348d49af572f3c27e" exitCode=0 Feb 16 12:13:17 crc kubenswrapper[4949]: I0216 12:13:17.944637 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pz59m" event={"ID":"6de6074f-bf93-4ca6-8355-a5de759e7169","Type":"ContainerDied","Data":"110fdb62abafd7e94a076b841f81ed50614227e2641ab2f348d49af572f3c27e"} Feb 16 12:13:18 crc kubenswrapper[4949]: I0216 12:13:18.957329 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pz59m" event={"ID":"6de6074f-bf93-4ca6-8355-a5de759e7169","Type":"ContainerStarted","Data":"48e32c0735c51c6e5d78f3d790d4a95d2f1e923348d3b59d52bd02a9a1aae8e2"} Feb 16 12:13:19 crc kubenswrapper[4949]: I0216 12:13:19.044812 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pz59m" podStartSLOduration=3.617078876 podStartE2EDuration="7.044794089s" podCreationTimestamp="2026-02-16 12:13:12 +0000 UTC" firstStartedPulling="2026-02-16 12:13:14.91032943 +0000 UTC m=+3984.539663595" lastFinishedPulling="2026-02-16 12:13:18.338044633 +0000 UTC m=+3987.967378808" observedRunningTime="2026-02-16 12:13:19.016097881 +0000 UTC m=+3988.645432056" watchObservedRunningTime="2026-02-16 12:13:19.044794089 +0000 UTC m=+3988.674128254" Feb 16 12:13:20 crc kubenswrapper[4949]: I0216 12:13:20.235411 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:13:20 crc kubenswrapper[4949]: E0216 12:13:20.236089 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:13:23 crc kubenswrapper[4949]: I0216 12:13:23.855042 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:23 crc kubenswrapper[4949]: I0216 12:13:23.855572 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:24 crc kubenswrapper[4949]: I0216 12:13:24.039048 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:24 crc kubenswrapper[4949]: I0216 12:13:24.098338 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:24 crc kubenswrapper[4949]: E0216 12:13:24.237689 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:13:24 crc kubenswrapper[4949]: I0216 12:13:24.279734 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pz59m"] Feb 16 12:13:26 crc kubenswrapper[4949]: I0216 12:13:26.062678 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pz59m" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerName="registry-server" containerID="cri-o://48e32c0735c51c6e5d78f3d790d4a95d2f1e923348d3b59d52bd02a9a1aae8e2" gracePeriod=2 Feb 16 12:13:26 crc kubenswrapper[4949]: E0216 12:13:26.236415 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.073706 4949 generic.go:334] "Generic (PLEG): container finished" podID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerID="48e32c0735c51c6e5d78f3d790d4a95d2f1e923348d3b59d52bd02a9a1aae8e2" exitCode=0 Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.073741 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pz59m" event={"ID":"6de6074f-bf93-4ca6-8355-a5de759e7169","Type":"ContainerDied","Data":"48e32c0735c51c6e5d78f3d790d4a95d2f1e923348d3b59d52bd02a9a1aae8e2"} Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.649834 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.814315 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-utilities\") pod \"6de6074f-bf93-4ca6-8355-a5de759e7169\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.814866 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxphl\" (UniqueName: \"kubernetes.io/projected/6de6074f-bf93-4ca6-8355-a5de759e7169-kube-api-access-wxphl\") pod \"6de6074f-bf93-4ca6-8355-a5de759e7169\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.814895 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-catalog-content\") pod \"6de6074f-bf93-4ca6-8355-a5de759e7169\" (UID: \"6de6074f-bf93-4ca6-8355-a5de759e7169\") " Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.818047 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-utilities" (OuterVolumeSpecName: "utilities") pod "6de6074f-bf93-4ca6-8355-a5de759e7169" (UID: "6de6074f-bf93-4ca6-8355-a5de759e7169"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.830440 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6de6074f-bf93-4ca6-8355-a5de759e7169-kube-api-access-wxphl" (OuterVolumeSpecName: "kube-api-access-wxphl") pod "6de6074f-bf93-4ca6-8355-a5de759e7169" (UID: "6de6074f-bf93-4ca6-8355-a5de759e7169"). InnerVolumeSpecName "kube-api-access-wxphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.871311 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6de6074f-bf93-4ca6-8355-a5de759e7169" (UID: "6de6074f-bf93-4ca6-8355-a5de759e7169"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.918360 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.918394 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxphl\" (UniqueName: \"kubernetes.io/projected/6de6074f-bf93-4ca6-8355-a5de759e7169-kube-api-access-wxphl\") on node \"crc\" DevicePath \"\"" Feb 16 12:13:27 crc kubenswrapper[4949]: I0216 12:13:27.918407 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6de6074f-bf93-4ca6-8355-a5de759e7169-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:13:28 crc kubenswrapper[4949]: I0216 12:13:28.087587 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pz59m" event={"ID":"6de6074f-bf93-4ca6-8355-a5de759e7169","Type":"ContainerDied","Data":"1f5544b393c33f4bdcc796c06f1116d5b2300cdfded8fe6ebe0efa553fe7fe92"} Feb 16 12:13:28 crc kubenswrapper[4949]: I0216 12:13:28.087653 4949 scope.go:117] "RemoveContainer" containerID="48e32c0735c51c6e5d78f3d790d4a95d2f1e923348d3b59d52bd02a9a1aae8e2" Feb 16 12:13:28 crc kubenswrapper[4949]: I0216 12:13:28.087696 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pz59m" Feb 16 12:13:28 crc kubenswrapper[4949]: I0216 12:13:28.123656 4949 scope.go:117] "RemoveContainer" containerID="110fdb62abafd7e94a076b841f81ed50614227e2641ab2f348d49af572f3c27e" Feb 16 12:13:28 crc kubenswrapper[4949]: I0216 12:13:28.136096 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pz59m"] Feb 16 12:13:28 crc kubenswrapper[4949]: I0216 12:13:28.147094 4949 scope.go:117] "RemoveContainer" containerID="93949ce75e295c60fb57f71ff72e4810842954beebdf6698520a72bf3671d19c" Feb 16 12:13:28 crc kubenswrapper[4949]: I0216 12:13:28.148359 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pz59m"] Feb 16 12:13:29 crc kubenswrapper[4949]: I0216 12:13:29.253038 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" path="/var/lib/kubelet/pods/6de6074f-bf93-4ca6-8355-a5de759e7169/volumes" Feb 16 12:13:32 crc kubenswrapper[4949]: I0216 12:13:32.235038 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:13:32 crc kubenswrapper[4949]: E0216 12:13:32.236141 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:13:36 crc kubenswrapper[4949]: E0216 12:13:36.238623 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:13:40 crc kubenswrapper[4949]: E0216 12:13:40.241449 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:13:47 crc kubenswrapper[4949]: I0216 12:13:47.236002 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:13:48 crc kubenswrapper[4949]: I0216 12:13:48.333568 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"74581081df8341579e9c4de46399b9d90a92c0b8a2652976dc69519a6af60f6c"} Feb 16 12:13:49 crc kubenswrapper[4949]: E0216 12:13:49.241562 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:13:49 crc kubenswrapper[4949]: I0216 12:13:49.348552 4949 generic.go:334] "Generic (PLEG): container finished" podID="83b6a7c1-807f-4f19-b519-75879c54d0c5" containerID="7eed7649a4e13d55185cadb72e9ee4199198b2ea8e504851124068cd070319fc" exitCode=2 Feb 16 12:13:49 crc kubenswrapper[4949]: I0216 12:13:49.348605 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" event={"ID":"83b6a7c1-807f-4f19-b519-75879c54d0c5","Type":"ContainerDied","Data":"7eed7649a4e13d55185cadb72e9ee4199198b2ea8e504851124068cd070319fc"} Feb 16 12:13:50 crc kubenswrapper[4949]: I0216 12:13:50.884723 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:13:50 crc kubenswrapper[4949]: I0216 12:13:50.922992 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-ssh-key-openstack-edpm-ipam\") pod \"83b6a7c1-807f-4f19-b519-75879c54d0c5\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " Feb 16 12:13:50 crc kubenswrapper[4949]: I0216 12:13:50.923073 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-inventory\") pod \"83b6a7c1-807f-4f19-b519-75879c54d0c5\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " Feb 16 12:13:50 crc kubenswrapper[4949]: I0216 12:13:50.923254 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfzj8\" (UniqueName: \"kubernetes.io/projected/83b6a7c1-807f-4f19-b519-75879c54d0c5-kube-api-access-bfzj8\") pod \"83b6a7c1-807f-4f19-b519-75879c54d0c5\" (UID: \"83b6a7c1-807f-4f19-b519-75879c54d0c5\") " Feb 16 12:13:50 crc kubenswrapper[4949]: I0216 12:13:50.928525 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83b6a7c1-807f-4f19-b519-75879c54d0c5-kube-api-access-bfzj8" (OuterVolumeSpecName: "kube-api-access-bfzj8") pod "83b6a7c1-807f-4f19-b519-75879c54d0c5" (UID: "83b6a7c1-807f-4f19-b519-75879c54d0c5"). InnerVolumeSpecName "kube-api-access-bfzj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:13:50 crc kubenswrapper[4949]: I0216 12:13:50.959979 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "83b6a7c1-807f-4f19-b519-75879c54d0c5" (UID: "83b6a7c1-807f-4f19-b519-75879c54d0c5"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:13:50 crc kubenswrapper[4949]: I0216 12:13:50.960044 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-inventory" (OuterVolumeSpecName: "inventory") pod "83b6a7c1-807f-4f19-b519-75879c54d0c5" (UID: "83b6a7c1-807f-4f19-b519-75879c54d0c5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:13:51 crc kubenswrapper[4949]: I0216 12:13:51.026882 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfzj8\" (UniqueName: \"kubernetes.io/projected/83b6a7c1-807f-4f19-b519-75879c54d0c5-kube-api-access-bfzj8\") on node \"crc\" DevicePath \"\"" Feb 16 12:13:51 crc kubenswrapper[4949]: I0216 12:13:51.027063 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 12:13:51 crc kubenswrapper[4949]: I0216 12:13:51.027138 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/83b6a7c1-807f-4f19-b519-75879c54d0c5-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 12:13:51 crc kubenswrapper[4949]: E0216 12:13:51.242345 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:13:51 crc kubenswrapper[4949]: I0216 12:13:51.395671 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" event={"ID":"83b6a7c1-807f-4f19-b519-75879c54d0c5","Type":"ContainerDied","Data":"b47800ceee8e70d64b798ec52a195c98bb936820d87e1df2fd07dab951bda416"} Feb 16 12:13:51 crc kubenswrapper[4949]: I0216 12:13:51.395714 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b47800ceee8e70d64b798ec52a195c98bb936820d87e1df2fd07dab951bda416" Feb 16 12:13:51 crc kubenswrapper[4949]: I0216 12:13:51.395771 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj" Feb 16 12:14:04 crc kubenswrapper[4949]: E0216 12:14:04.237335 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:14:06 crc kubenswrapper[4949]: E0216 12:14:06.238096 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:14:18 crc kubenswrapper[4949]: E0216 12:14:18.237810 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:14:18 crc kubenswrapper[4949]: E0216 12:14:18.239581 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:14:31 crc kubenswrapper[4949]: E0216 12:14:31.255010 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:14:32 crc kubenswrapper[4949]: E0216 12:14:32.236530 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:14:44 crc kubenswrapper[4949]: E0216 12:14:44.237215 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:14:44 crc kubenswrapper[4949]: E0216 12:14:44.238394 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:14:55 crc kubenswrapper[4949]: E0216 12:14:55.237813 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:14:58 crc kubenswrapper[4949]: E0216 12:14:58.237495 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.169647 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98"] Feb 16 12:15:00 crc kubenswrapper[4949]: E0216 12:15:00.170621 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b6a7c1-807f-4f19-b519-75879c54d0c5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.170637 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b6a7c1-807f-4f19-b519-75879c54d0c5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:15:00 crc kubenswrapper[4949]: E0216 12:15:00.170662 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerName="extract-utilities" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.170672 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerName="extract-utilities" Feb 16 12:15:00 crc kubenswrapper[4949]: E0216 12:15:00.170704 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerName="extract-content" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.170713 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerName="extract-content" Feb 16 12:15:00 crc kubenswrapper[4949]: E0216 12:15:00.170725 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerName="registry-server" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.170733 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerName="registry-server" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.170985 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b6a7c1-807f-4f19-b519-75879c54d0c5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.171010 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="6de6074f-bf93-4ca6-8355-a5de759e7169" containerName="registry-server" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.172059 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.174798 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.174904 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.190302 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98"] Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.268538 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkwbn\" (UniqueName: \"kubernetes.io/projected/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-kube-api-access-tkwbn\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.268889 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-secret-volume\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.269100 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-config-volume\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.371486 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkwbn\" (UniqueName: \"kubernetes.io/projected/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-kube-api-access-tkwbn\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.371556 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-secret-volume\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.371632 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-config-volume\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.372528 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-config-volume\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.381689 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-secret-volume\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.389549 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkwbn\" (UniqueName: \"kubernetes.io/projected/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-kube-api-access-tkwbn\") pod \"collect-profiles-29520735-d2x98\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.507950 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:00 crc kubenswrapper[4949]: I0216 12:15:00.982673 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98"] Feb 16 12:15:01 crc kubenswrapper[4949]: I0216 12:15:01.190460 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" event={"ID":"9d13f444-d9b8-47a9-9e8f-ef992b7575f6","Type":"ContainerStarted","Data":"7589aab6532b8a8beaa71f65ab93e021de2cafc9c7bbaf501b0f81aceac70bf4"} Feb 16 12:15:02 crc kubenswrapper[4949]: I0216 12:15:02.206445 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" event={"ID":"9d13f444-d9b8-47a9-9e8f-ef992b7575f6","Type":"ContainerStarted","Data":"1398d077678ad961993fe23c7e8bb29c8aeeced84eac9ac1525b6c3d77d2c42d"} Feb 16 12:15:02 crc kubenswrapper[4949]: I0216 12:15:02.231142 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" podStartSLOduration=2.231121273 podStartE2EDuration="2.231121273s" podCreationTimestamp="2026-02-16 12:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 12:15:02.219846601 +0000 UTC m=+4091.849180786" watchObservedRunningTime="2026-02-16 12:15:02.231121273 +0000 UTC m=+4091.860455438" Feb 16 12:15:03 crc kubenswrapper[4949]: I0216 12:15:03.221203 4949 generic.go:334] "Generic (PLEG): container finished" podID="9d13f444-d9b8-47a9-9e8f-ef992b7575f6" containerID="1398d077678ad961993fe23c7e8bb29c8aeeced84eac9ac1525b6c3d77d2c42d" exitCode=0 Feb 16 12:15:03 crc kubenswrapper[4949]: I0216 12:15:03.221280 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" event={"ID":"9d13f444-d9b8-47a9-9e8f-ef992b7575f6","Type":"ContainerDied","Data":"1398d077678ad961993fe23c7e8bb29c8aeeced84eac9ac1525b6c3d77d2c42d"} Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.577289 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.689900 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-secret-volume\") pod \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.689973 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-config-volume\") pod \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.690161 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkwbn\" (UniqueName: \"kubernetes.io/projected/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-kube-api-access-tkwbn\") pod \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\" (UID: \"9d13f444-d9b8-47a9-9e8f-ef992b7575f6\") " Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.691866 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-config-volume" (OuterVolumeSpecName: "config-volume") pod "9d13f444-d9b8-47a9-9e8f-ef992b7575f6" (UID: "9d13f444-d9b8-47a9-9e8f-ef992b7575f6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.697496 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9d13f444-d9b8-47a9-9e8f-ef992b7575f6" (UID: "9d13f444-d9b8-47a9-9e8f-ef992b7575f6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.716014 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-kube-api-access-tkwbn" (OuterVolumeSpecName: "kube-api-access-tkwbn") pod "9d13f444-d9b8-47a9-9e8f-ef992b7575f6" (UID: "9d13f444-d9b8-47a9-9e8f-ef992b7575f6"). InnerVolumeSpecName "kube-api-access-tkwbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.794440 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.794484 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkwbn\" (UniqueName: \"kubernetes.io/projected/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-kube-api-access-tkwbn\") on node \"crc\" DevicePath \"\"" Feb 16 12:15:04 crc kubenswrapper[4949]: I0216 12:15:04.794502 4949 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9d13f444-d9b8-47a9-9e8f-ef992b7575f6-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 16 12:15:05 crc kubenswrapper[4949]: I0216 12:15:05.310033 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" event={"ID":"9d13f444-d9b8-47a9-9e8f-ef992b7575f6","Type":"ContainerDied","Data":"7589aab6532b8a8beaa71f65ab93e021de2cafc9c7bbaf501b0f81aceac70bf4"} Feb 16 12:15:05 crc kubenswrapper[4949]: I0216 12:15:05.310385 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7589aab6532b8a8beaa71f65ab93e021de2cafc9c7bbaf501b0f81aceac70bf4" Feb 16 12:15:05 crc kubenswrapper[4949]: I0216 12:15:05.310402 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520735-d2x98" Feb 16 12:15:05 crc kubenswrapper[4949]: I0216 12:15:05.672519 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4"] Feb 16 12:15:05 crc kubenswrapper[4949]: I0216 12:15:05.684207 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520690-hwpp4"] Feb 16 12:15:07 crc kubenswrapper[4949]: E0216 12:15:07.237087 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:15:07 crc kubenswrapper[4949]: I0216 12:15:07.248062 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb474750-e885-4f04-b26c-fedb8cc342ce" path="/var/lib/kubelet/pods/cb474750-e885-4f04-b26c-fedb8cc342ce/volumes" Feb 16 12:15:09 crc kubenswrapper[4949]: E0216 12:15:09.261906 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:15:19 crc kubenswrapper[4949]: E0216 12:15:19.238706 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:15:23 crc kubenswrapper[4949]: E0216 12:15:23.238758 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:15:33 crc kubenswrapper[4949]: E0216 12:15:33.238687 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:15:37 crc kubenswrapper[4949]: E0216 12:15:37.239846 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:15:44 crc kubenswrapper[4949]: E0216 12:15:44.238028 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:15:49 crc kubenswrapper[4949]: E0216 12:15:49.240560 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:15:59 crc kubenswrapper[4949]: E0216 12:15:59.238607 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:15:59 crc kubenswrapper[4949]: I0216 12:15:59.494600 4949 scope.go:117] "RemoveContainer" containerID="8662e3fc86f40863e7b490e1aa8d615df2b8a4cdd973bc218a0211a809448ffe" Feb 16 12:16:03 crc kubenswrapper[4949]: E0216 12:16:03.237568 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:16:04 crc kubenswrapper[4949]: I0216 12:16:04.550515 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:16:04 crc kubenswrapper[4949]: I0216 12:16:04.551489 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:16:10 crc kubenswrapper[4949]: E0216 12:16:10.238325 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:16:16 crc kubenswrapper[4949]: E0216 12:16:16.239570 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:16:21 crc kubenswrapper[4949]: E0216 12:16:21.247529 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.041534 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6"] Feb 16 12:16:29 crc kubenswrapper[4949]: E0216 12:16:29.042507 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d13f444-d9b8-47a9-9e8f-ef992b7575f6" containerName="collect-profiles" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.042521 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d13f444-d9b8-47a9-9e8f-ef992b7575f6" containerName="collect-profiles" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.042757 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d13f444-d9b8-47a9-9e8f-ef992b7575f6" containerName="collect-profiles" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.043551 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.047279 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.047378 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.047636 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.048330 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.077095 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6"] Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.137059 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.137131 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.137160 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w9p5\" (UniqueName: \"kubernetes.io/projected/b4b77ec2-a1cb-437b-86a9-a9554e316035-kube-api-access-9w9p5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.240933 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.241028 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.241072 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w9p5\" (UniqueName: \"kubernetes.io/projected/b4b77ec2-a1cb-437b-86a9-a9554e316035-kube-api-access-9w9p5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.251504 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.268345 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.278715 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w9p5\" (UniqueName: \"kubernetes.io/projected/b4b77ec2-a1cb-437b-86a9-a9554e316035-kube-api-access-9w9p5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.386009 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:16:29 crc kubenswrapper[4949]: I0216 12:16:29.948573 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6"] Feb 16 12:16:30 crc kubenswrapper[4949]: E0216 12:16:30.238245 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:16:30 crc kubenswrapper[4949]: I0216 12:16:30.418999 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" event={"ID":"b4b77ec2-a1cb-437b-86a9-a9554e316035","Type":"ContainerStarted","Data":"3d524c6182a54ffb555c15bfaf6dcf283327ddf2dc933f5b6f7a6cffaad5fca7"} Feb 16 12:16:31 crc kubenswrapper[4949]: I0216 12:16:31.430834 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" event={"ID":"b4b77ec2-a1cb-437b-86a9-a9554e316035","Type":"ContainerStarted","Data":"e6184ac4b74ecb21cb0207c86b63ba6c969df2451fb3aca355fd9c083236056b"} Feb 16 12:16:31 crc kubenswrapper[4949]: I0216 12:16:31.454368 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" podStartSLOduration=1.835176608 podStartE2EDuration="2.454350985s" podCreationTimestamp="2026-02-16 12:16:29 +0000 UTC" firstStartedPulling="2026-02-16 12:16:29.945695571 +0000 UTC m=+4179.575029736" lastFinishedPulling="2026-02-16 12:16:30.564869958 +0000 UTC m=+4180.194204113" observedRunningTime="2026-02-16 12:16:31.446047648 +0000 UTC m=+4181.075381853" watchObservedRunningTime="2026-02-16 12:16:31.454350985 +0000 UTC m=+4181.083685150" Feb 16 12:16:34 crc kubenswrapper[4949]: I0216 12:16:34.550033 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:16:34 crc kubenswrapper[4949]: I0216 12:16:34.550686 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:16:36 crc kubenswrapper[4949]: E0216 12:16:36.237438 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:16:43 crc kubenswrapper[4949]: E0216 12:16:43.237621 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:16:48 crc kubenswrapper[4949]: I0216 12:16:48.237956 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:16:48 crc kubenswrapper[4949]: E0216 12:16:48.370646 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:16:48 crc kubenswrapper[4949]: E0216 12:16:48.370709 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:16:48 crc kubenswrapper[4949]: E0216 12:16:48.370852 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:16:48 crc kubenswrapper[4949]: E0216 12:16:48.372125 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:16:58 crc kubenswrapper[4949]: E0216 12:16:58.341530 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:16:58 crc kubenswrapper[4949]: E0216 12:16:58.342054 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:16:58 crc kubenswrapper[4949]: E0216 12:16:58.342187 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:16:58 crc kubenswrapper[4949]: E0216 12:16:58.343407 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:17:00 crc kubenswrapper[4949]: E0216 12:17:00.240249 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:17:04 crc kubenswrapper[4949]: I0216 12:17:04.550440 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:17:04 crc kubenswrapper[4949]: I0216 12:17:04.550994 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:17:04 crc kubenswrapper[4949]: I0216 12:17:04.551045 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 12:17:04 crc kubenswrapper[4949]: I0216 12:17:04.551977 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"74581081df8341579e9c4de46399b9d90a92c0b8a2652976dc69519a6af60f6c"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 12:17:04 crc kubenswrapper[4949]: I0216 12:17:04.552049 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://74581081df8341579e9c4de46399b9d90a92c0b8a2652976dc69519a6af60f6c" gracePeriod=600 Feb 16 12:17:04 crc kubenswrapper[4949]: I0216 12:17:04.827604 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="74581081df8341579e9c4de46399b9d90a92c0b8a2652976dc69519a6af60f6c" exitCode=0 Feb 16 12:17:04 crc kubenswrapper[4949]: I0216 12:17:04.827682 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"74581081df8341579e9c4de46399b9d90a92c0b8a2652976dc69519a6af60f6c"} Feb 16 12:17:04 crc kubenswrapper[4949]: I0216 12:17:04.828019 4949 scope.go:117] "RemoveContainer" containerID="c94c7ff085a1acabde918de5332cbd7f8a47e539952a8c199c48fa312cdccc2f" Feb 16 12:17:05 crc kubenswrapper[4949]: I0216 12:17:05.843031 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3"} Feb 16 12:17:12 crc kubenswrapper[4949]: E0216 12:17:12.238219 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:17:12 crc kubenswrapper[4949]: E0216 12:17:12.239124 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:17:25 crc kubenswrapper[4949]: E0216 12:17:25.238641 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:17:27 crc kubenswrapper[4949]: E0216 12:17:27.240467 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:17:38 crc kubenswrapper[4949]: E0216 12:17:38.239138 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:17:41 crc kubenswrapper[4949]: E0216 12:17:41.245418 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:17:52 crc kubenswrapper[4949]: E0216 12:17:52.237566 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:17:52 crc kubenswrapper[4949]: E0216 12:17:52.237644 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:18:04 crc kubenswrapper[4949]: E0216 12:18:04.239613 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:18:07 crc kubenswrapper[4949]: E0216 12:18:07.237125 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:18:18 crc kubenswrapper[4949]: E0216 12:18:18.237449 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:18:19 crc kubenswrapper[4949]: E0216 12:18:19.250265 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:18:32 crc kubenswrapper[4949]: E0216 12:18:32.238624 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:18:33 crc kubenswrapper[4949]: E0216 12:18:33.238718 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:18:44 crc kubenswrapper[4949]: E0216 12:18:44.238860 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:18:45 crc kubenswrapper[4949]: E0216 12:18:45.239181 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:18:57 crc kubenswrapper[4949]: E0216 12:18:57.238438 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:18:57 crc kubenswrapper[4949]: E0216 12:18:57.239056 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:19:04 crc kubenswrapper[4949]: I0216 12:19:04.550773 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:19:04 crc kubenswrapper[4949]: I0216 12:19:04.551411 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:19:08 crc kubenswrapper[4949]: E0216 12:19:08.236907 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:19:09 crc kubenswrapper[4949]: E0216 12:19:09.240730 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:19:20 crc kubenswrapper[4949]: E0216 12:19:20.240895 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:19:24 crc kubenswrapper[4949]: E0216 12:19:24.238714 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:19:34 crc kubenswrapper[4949]: I0216 12:19:34.550349 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:19:34 crc kubenswrapper[4949]: I0216 12:19:34.551428 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:19:35 crc kubenswrapper[4949]: E0216 12:19:35.238407 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:19:39 crc kubenswrapper[4949]: E0216 12:19:39.268072 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.025998 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jcffq"] Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.031002 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.099432 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcffq"] Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.131384 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-catalog-content\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.131990 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvttx\" (UniqueName: \"kubernetes.io/projected/24d57688-6e83-41c1-b4c3-8295a3692a1a-kube-api-access-hvttx\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.132071 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-utilities\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.234646 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvttx\" (UniqueName: \"kubernetes.io/projected/24d57688-6e83-41c1-b4c3-8295a3692a1a-kube-api-access-hvttx\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.234698 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-utilities\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.234779 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-catalog-content\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.235357 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-utilities\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.235402 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-catalog-content\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.254245 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvttx\" (UniqueName: \"kubernetes.io/projected/24d57688-6e83-41c1-b4c3-8295a3692a1a-kube-api-access-hvttx\") pod \"redhat-marketplace-jcffq\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.408883 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:41 crc kubenswrapper[4949]: I0216 12:19:41.922422 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcffq"] Feb 16 12:19:42 crc kubenswrapper[4949]: I0216 12:19:42.788368 4949 generic.go:334] "Generic (PLEG): container finished" podID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerID="3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6" exitCode=0 Feb 16 12:19:42 crc kubenswrapper[4949]: I0216 12:19:42.788485 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcffq" event={"ID":"24d57688-6e83-41c1-b4c3-8295a3692a1a","Type":"ContainerDied","Data":"3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6"} Feb 16 12:19:42 crc kubenswrapper[4949]: I0216 12:19:42.788684 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcffq" event={"ID":"24d57688-6e83-41c1-b4c3-8295a3692a1a","Type":"ContainerStarted","Data":"9c25543f81e4d1e54b25bc5a270464b1dc09330c793bf7640d6c3152e607ba77"} Feb 16 12:19:43 crc kubenswrapper[4949]: I0216 12:19:43.797901 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcffq" event={"ID":"24d57688-6e83-41c1-b4c3-8295a3692a1a","Type":"ContainerStarted","Data":"8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd"} Feb 16 12:19:44 crc kubenswrapper[4949]: I0216 12:19:44.809442 4949 generic.go:334] "Generic (PLEG): container finished" podID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerID="8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd" exitCode=0 Feb 16 12:19:44 crc kubenswrapper[4949]: I0216 12:19:44.809506 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcffq" event={"ID":"24d57688-6e83-41c1-b4c3-8295a3692a1a","Type":"ContainerDied","Data":"8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd"} Feb 16 12:19:46 crc kubenswrapper[4949]: I0216 12:19:46.838325 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcffq" event={"ID":"24d57688-6e83-41c1-b4c3-8295a3692a1a","Type":"ContainerStarted","Data":"b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2"} Feb 16 12:19:46 crc kubenswrapper[4949]: I0216 12:19:46.861903 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jcffq" podStartSLOduration=3.108141502 podStartE2EDuration="6.861885123s" podCreationTimestamp="2026-02-16 12:19:40 +0000 UTC" firstStartedPulling="2026-02-16 12:19:42.791641646 +0000 UTC m=+4372.420975821" lastFinishedPulling="2026-02-16 12:19:46.545385237 +0000 UTC m=+4376.174719442" observedRunningTime="2026-02-16 12:19:46.861563374 +0000 UTC m=+4376.490897559" watchObservedRunningTime="2026-02-16 12:19:46.861885123 +0000 UTC m=+4376.491219288" Feb 16 12:19:49 crc kubenswrapper[4949]: E0216 12:19:49.239399 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:19:51 crc kubenswrapper[4949]: I0216 12:19:51.409012 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:51 crc kubenswrapper[4949]: I0216 12:19:51.411487 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:51 crc kubenswrapper[4949]: I0216 12:19:51.489515 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:51 crc kubenswrapper[4949]: I0216 12:19:51.945789 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:52 crc kubenswrapper[4949]: I0216 12:19:52.010342 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcffq"] Feb 16 12:19:53 crc kubenswrapper[4949]: E0216 12:19:53.236734 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:19:53 crc kubenswrapper[4949]: I0216 12:19:53.915637 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jcffq" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerName="registry-server" containerID="cri-o://b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2" gracePeriod=2 Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.525278 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.726157 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-catalog-content\") pod \"24d57688-6e83-41c1-b4c3-8295a3692a1a\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.726241 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvttx\" (UniqueName: \"kubernetes.io/projected/24d57688-6e83-41c1-b4c3-8295a3692a1a-kube-api-access-hvttx\") pod \"24d57688-6e83-41c1-b4c3-8295a3692a1a\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.726407 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-utilities\") pod \"24d57688-6e83-41c1-b4c3-8295a3692a1a\" (UID: \"24d57688-6e83-41c1-b4c3-8295a3692a1a\") " Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.727044 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-utilities" (OuterVolumeSpecName: "utilities") pod "24d57688-6e83-41c1-b4c3-8295a3692a1a" (UID: "24d57688-6e83-41c1-b4c3-8295a3692a1a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.735861 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24d57688-6e83-41c1-b4c3-8295a3692a1a-kube-api-access-hvttx" (OuterVolumeSpecName: "kube-api-access-hvttx") pod "24d57688-6e83-41c1-b4c3-8295a3692a1a" (UID: "24d57688-6e83-41c1-b4c3-8295a3692a1a"). InnerVolumeSpecName "kube-api-access-hvttx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.749686 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24d57688-6e83-41c1-b4c3-8295a3692a1a" (UID: "24d57688-6e83-41c1-b4c3-8295a3692a1a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.829308 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.829351 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24d57688-6e83-41c1-b4c3-8295a3692a1a-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.829366 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvttx\" (UniqueName: \"kubernetes.io/projected/24d57688-6e83-41c1-b4c3-8295a3692a1a-kube-api-access-hvttx\") on node \"crc\" DevicePath \"\"" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.949807 4949 generic.go:334] "Generic (PLEG): container finished" podID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerID="b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2" exitCode=0 Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.949885 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcffq" event={"ID":"24d57688-6e83-41c1-b4c3-8295a3692a1a","Type":"ContainerDied","Data":"b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2"} Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.949922 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcffq" event={"ID":"24d57688-6e83-41c1-b4c3-8295a3692a1a","Type":"ContainerDied","Data":"9c25543f81e4d1e54b25bc5a270464b1dc09330c793bf7640d6c3152e607ba77"} Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.949956 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jcffq" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.949951 4949 scope.go:117] "RemoveContainer" containerID="b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2" Feb 16 12:19:54 crc kubenswrapper[4949]: I0216 12:19:54.983968 4949 scope.go:117] "RemoveContainer" containerID="8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd" Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.021726 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcffq"] Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.043474 4949 scope.go:117] "RemoveContainer" containerID="3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6" Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.044142 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcffq"] Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.089751 4949 scope.go:117] "RemoveContainer" containerID="b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2" Feb 16 12:19:55 crc kubenswrapper[4949]: E0216 12:19:55.090380 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2\": container with ID starting with b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2 not found: ID does not exist" containerID="b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2" Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.090420 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2"} err="failed to get container status \"b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2\": rpc error: code = NotFound desc = could not find container \"b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2\": container with ID starting with b6ec0cf12e838eb1e5c01bef35384a20ab353385bdf6b22373a79a7a6e6ad4f2 not found: ID does not exist" Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.090474 4949 scope.go:117] "RemoveContainer" containerID="8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd" Feb 16 12:19:55 crc kubenswrapper[4949]: E0216 12:19:55.090958 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd\": container with ID starting with 8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd not found: ID does not exist" containerID="8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd" Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.091000 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd"} err="failed to get container status \"8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd\": rpc error: code = NotFound desc = could not find container \"8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd\": container with ID starting with 8b5f1c7ddd2d81ff46ed8654d4d74e8935c71d68ed3521ba51c8743f915acedd not found: ID does not exist" Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.091026 4949 scope.go:117] "RemoveContainer" containerID="3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6" Feb 16 12:19:55 crc kubenswrapper[4949]: E0216 12:19:55.091437 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6\": container with ID starting with 3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6 not found: ID does not exist" containerID="3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6" Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.091452 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6"} err="failed to get container status \"3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6\": rpc error: code = NotFound desc = could not find container \"3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6\": container with ID starting with 3c041502bfbbe0bc0c84cdf7d399b84a7ad1dd9c2d0e61b57f82625f6687b7d6 not found: ID does not exist" Feb 16 12:19:55 crc kubenswrapper[4949]: I0216 12:19:55.264433 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" path="/var/lib/kubelet/pods/24d57688-6e83-41c1-b4c3-8295a3692a1a/volumes" Feb 16 12:20:02 crc kubenswrapper[4949]: E0216 12:20:02.238439 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:20:04 crc kubenswrapper[4949]: I0216 12:20:04.550793 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:20:04 crc kubenswrapper[4949]: I0216 12:20:04.551074 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:20:04 crc kubenswrapper[4949]: I0216 12:20:04.551117 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 12:20:04 crc kubenswrapper[4949]: I0216 12:20:04.552048 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 12:20:04 crc kubenswrapper[4949]: I0216 12:20:04.552107 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" gracePeriod=600 Feb 16 12:20:04 crc kubenswrapper[4949]: E0216 12:20:04.690676 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:20:05 crc kubenswrapper[4949]: I0216 12:20:05.097159 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" exitCode=0 Feb 16 12:20:05 crc kubenswrapper[4949]: I0216 12:20:05.097224 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3"} Feb 16 12:20:05 crc kubenswrapper[4949]: I0216 12:20:05.097533 4949 scope.go:117] "RemoveContainer" containerID="74581081df8341579e9c4de46399b9d90a92c0b8a2652976dc69519a6af60f6c" Feb 16 12:20:05 crc kubenswrapper[4949]: I0216 12:20:05.099332 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:20:05 crc kubenswrapper[4949]: E0216 12:20:05.100295 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:20:08 crc kubenswrapper[4949]: E0216 12:20:08.240759 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:20:13 crc kubenswrapper[4949]: E0216 12:20:13.240868 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:20:20 crc kubenswrapper[4949]: I0216 12:20:20.236316 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:20:20 crc kubenswrapper[4949]: E0216 12:20:20.237240 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:20:20 crc kubenswrapper[4949]: E0216 12:20:20.240332 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:20:27 crc kubenswrapper[4949]: E0216 12:20:27.238302 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:20:32 crc kubenswrapper[4949]: E0216 12:20:32.237577 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:20:34 crc kubenswrapper[4949]: I0216 12:20:34.235155 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:20:34 crc kubenswrapper[4949]: E0216 12:20:34.235998 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:20:41 crc kubenswrapper[4949]: E0216 12:20:41.257678 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:20:45 crc kubenswrapper[4949]: E0216 12:20:45.239679 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:20:47 crc kubenswrapper[4949]: I0216 12:20:47.237692 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:20:47 crc kubenswrapper[4949]: E0216 12:20:47.240593 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:20:56 crc kubenswrapper[4949]: E0216 12:20:56.238830 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:20:56 crc kubenswrapper[4949]: E0216 12:20:56.239156 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:20:59 crc kubenswrapper[4949]: I0216 12:20:59.236268 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:20:59 crc kubenswrapper[4949]: E0216 12:20:59.237747 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:21:08 crc kubenswrapper[4949]: E0216 12:21:08.241253 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:21:11 crc kubenswrapper[4949]: E0216 12:21:11.253237 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:21:14 crc kubenswrapper[4949]: I0216 12:21:14.235899 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:21:14 crc kubenswrapper[4949]: E0216 12:21:14.237110 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:21:20 crc kubenswrapper[4949]: E0216 12:21:20.241246 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:21:25 crc kubenswrapper[4949]: E0216 12:21:25.241937 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:21:26 crc kubenswrapper[4949]: I0216 12:21:26.235338 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:21:26 crc kubenswrapper[4949]: E0216 12:21:26.235979 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:21:35 crc kubenswrapper[4949]: E0216 12:21:35.239857 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:21:37 crc kubenswrapper[4949]: E0216 12:21:37.239050 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:21:41 crc kubenswrapper[4949]: I0216 12:21:41.257406 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:21:41 crc kubenswrapper[4949]: E0216 12:21:41.259133 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:21:49 crc kubenswrapper[4949]: E0216 12:21:49.240786 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:21:52 crc kubenswrapper[4949]: I0216 12:21:52.237435 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:21:52 crc kubenswrapper[4949]: E0216 12:21:52.372220 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:21:52 crc kubenswrapper[4949]: E0216 12:21:52.372299 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:21:52 crc kubenswrapper[4949]: E0216 12:21:52.372468 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:21:52 crc kubenswrapper[4949]: E0216 12:21:52.373711 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:21:54 crc kubenswrapper[4949]: I0216 12:21:54.236252 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:21:54 crc kubenswrapper[4949]: E0216 12:21:54.237754 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:22:02 crc kubenswrapper[4949]: E0216 12:22:02.371133 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:22:02 crc kubenswrapper[4949]: E0216 12:22:02.371950 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:22:02 crc kubenswrapper[4949]: E0216 12:22:02.372138 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:22:02 crc kubenswrapper[4949]: E0216 12:22:02.373469 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:22:04 crc kubenswrapper[4949]: E0216 12:22:04.237393 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:22:05 crc kubenswrapper[4949]: I0216 12:22:05.251416 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:22:05 crc kubenswrapper[4949]: E0216 12:22:05.252062 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:22:13 crc kubenswrapper[4949]: E0216 12:22:13.238698 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:22:15 crc kubenswrapper[4949]: E0216 12:22:15.240391 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:22:18 crc kubenswrapper[4949]: I0216 12:22:18.236096 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:22:18 crc kubenswrapper[4949]: E0216 12:22:18.236821 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:22:24 crc kubenswrapper[4949]: E0216 12:22:24.238531 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:22:27 crc kubenswrapper[4949]: E0216 12:22:27.239248 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:22:33 crc kubenswrapper[4949]: I0216 12:22:33.236135 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:22:33 crc kubenswrapper[4949]: E0216 12:22:33.237113 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:22:37 crc kubenswrapper[4949]: E0216 12:22:37.241445 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:22:41 crc kubenswrapper[4949]: E0216 12:22:41.248665 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:22:46 crc kubenswrapper[4949]: I0216 12:22:46.235871 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:22:46 crc kubenswrapper[4949]: E0216 12:22:46.236841 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:22:48 crc kubenswrapper[4949]: E0216 12:22:48.239547 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:22:50 crc kubenswrapper[4949]: I0216 12:22:50.254535 4949 generic.go:334] "Generic (PLEG): container finished" podID="b4b77ec2-a1cb-437b-86a9-a9554e316035" containerID="e6184ac4b74ecb21cb0207c86b63ba6c969df2451fb3aca355fd9c083236056b" exitCode=2 Feb 16 12:22:50 crc kubenswrapper[4949]: I0216 12:22:50.254970 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" event={"ID":"b4b77ec2-a1cb-437b-86a9-a9554e316035","Type":"ContainerDied","Data":"e6184ac4b74ecb21cb0207c86b63ba6c969df2451fb3aca355fd9c083236056b"} Feb 16 12:22:51 crc kubenswrapper[4949]: I0216 12:22:51.795454 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:22:51 crc kubenswrapper[4949]: I0216 12:22:51.898358 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-inventory\") pod \"b4b77ec2-a1cb-437b-86a9-a9554e316035\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " Feb 16 12:22:51 crc kubenswrapper[4949]: I0216 12:22:51.898421 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9w9p5\" (UniqueName: \"kubernetes.io/projected/b4b77ec2-a1cb-437b-86a9-a9554e316035-kube-api-access-9w9p5\") pod \"b4b77ec2-a1cb-437b-86a9-a9554e316035\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " Feb 16 12:22:51 crc kubenswrapper[4949]: I0216 12:22:51.898832 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-ssh-key-openstack-edpm-ipam\") pod \"b4b77ec2-a1cb-437b-86a9-a9554e316035\" (UID: \"b4b77ec2-a1cb-437b-86a9-a9554e316035\") " Feb 16 12:22:51 crc kubenswrapper[4949]: I0216 12:22:51.903913 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4b77ec2-a1cb-437b-86a9-a9554e316035-kube-api-access-9w9p5" (OuterVolumeSpecName: "kube-api-access-9w9p5") pod "b4b77ec2-a1cb-437b-86a9-a9554e316035" (UID: "b4b77ec2-a1cb-437b-86a9-a9554e316035"). InnerVolumeSpecName "kube-api-access-9w9p5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:22:51 crc kubenswrapper[4949]: I0216 12:22:51.932693 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b4b77ec2-a1cb-437b-86a9-a9554e316035" (UID: "b4b77ec2-a1cb-437b-86a9-a9554e316035"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:22:51 crc kubenswrapper[4949]: I0216 12:22:51.935774 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-inventory" (OuterVolumeSpecName: "inventory") pod "b4b77ec2-a1cb-437b-86a9-a9554e316035" (UID: "b4b77ec2-a1cb-437b-86a9-a9554e316035"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:22:52 crc kubenswrapper[4949]: I0216 12:22:52.002286 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 12:22:52 crc kubenswrapper[4949]: I0216 12:22:52.002322 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9w9p5\" (UniqueName: \"kubernetes.io/projected/b4b77ec2-a1cb-437b-86a9-a9554e316035-kube-api-access-9w9p5\") on node \"crc\" DevicePath \"\"" Feb 16 12:22:52 crc kubenswrapper[4949]: I0216 12:22:52.002335 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4b77ec2-a1cb-437b-86a9-a9554e316035-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 12:22:52 crc kubenswrapper[4949]: I0216 12:22:52.287537 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" event={"ID":"b4b77ec2-a1cb-437b-86a9-a9554e316035","Type":"ContainerDied","Data":"3d524c6182a54ffb555c15bfaf6dcf283327ddf2dc933f5b6f7a6cffaad5fca7"} Feb 16 12:22:52 crc kubenswrapper[4949]: I0216 12:22:52.287599 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d524c6182a54ffb555c15bfaf6dcf283327ddf2dc933f5b6f7a6cffaad5fca7" Feb 16 12:22:52 crc kubenswrapper[4949]: I0216 12:22:52.287641 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6" Feb 16 12:22:54 crc kubenswrapper[4949]: E0216 12:22:54.237618 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:22:58 crc kubenswrapper[4949]: I0216 12:22:58.235968 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:22:58 crc kubenswrapper[4949]: E0216 12:22:58.236835 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:22:59 crc kubenswrapper[4949]: E0216 12:22:59.237368 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.687455 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tjfp7"] Feb 16 12:22:59 crc kubenswrapper[4949]: E0216 12:22:59.688387 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerName="extract-utilities" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.688410 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerName="extract-utilities" Feb 16 12:22:59 crc kubenswrapper[4949]: E0216 12:22:59.688435 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerName="extract-content" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.688444 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerName="extract-content" Feb 16 12:22:59 crc kubenswrapper[4949]: E0216 12:22:59.688485 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerName="registry-server" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.688493 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerName="registry-server" Feb 16 12:22:59 crc kubenswrapper[4949]: E0216 12:22:59.688516 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b77ec2-a1cb-437b-86a9-a9554e316035" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.688526 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b77ec2-a1cb-437b-86a9-a9554e316035" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.688771 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4b77ec2-a1cb-437b-86a9-a9554e316035" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.688819 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="24d57688-6e83-41c1-b4c3-8295a3692a1a" containerName="registry-server" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.690833 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.713343 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-catalog-content\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.713533 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bltzh\" (UniqueName: \"kubernetes.io/projected/8dae2b4d-2874-41be-b02a-161eed0e8192-kube-api-access-bltzh\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.713602 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-utilities\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.727738 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tjfp7"] Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.815675 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-catalog-content\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.815763 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bltzh\" (UniqueName: \"kubernetes.io/projected/8dae2b4d-2874-41be-b02a-161eed0e8192-kube-api-access-bltzh\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.815795 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-utilities\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.816437 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-utilities\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.816565 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-catalog-content\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:22:59 crc kubenswrapper[4949]: I0216 12:22:59.846124 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bltzh\" (UniqueName: \"kubernetes.io/projected/8dae2b4d-2874-41be-b02a-161eed0e8192-kube-api-access-bltzh\") pod \"redhat-operators-tjfp7\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:23:00 crc kubenswrapper[4949]: I0216 12:23:00.026474 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:23:00 crc kubenswrapper[4949]: I0216 12:23:00.539448 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tjfp7"] Feb 16 12:23:01 crc kubenswrapper[4949]: I0216 12:23:01.388987 4949 generic.go:334] "Generic (PLEG): container finished" podID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerID="3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e" exitCode=0 Feb 16 12:23:01 crc kubenswrapper[4949]: I0216 12:23:01.389209 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjfp7" event={"ID":"8dae2b4d-2874-41be-b02a-161eed0e8192","Type":"ContainerDied","Data":"3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e"} Feb 16 12:23:01 crc kubenswrapper[4949]: I0216 12:23:01.389319 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjfp7" event={"ID":"8dae2b4d-2874-41be-b02a-161eed0e8192","Type":"ContainerStarted","Data":"a7849be9a3c863d89da5ea18a953d19169b3320e6b54304854041467a8d96194"} Feb 16 12:23:03 crc kubenswrapper[4949]: I0216 12:23:03.413823 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjfp7" event={"ID":"8dae2b4d-2874-41be-b02a-161eed0e8192","Type":"ContainerStarted","Data":"7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb"} Feb 16 12:23:06 crc kubenswrapper[4949]: E0216 12:23:06.240693 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:23:07 crc kubenswrapper[4949]: I0216 12:23:07.475414 4949 generic.go:334] "Generic (PLEG): container finished" podID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerID="7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb" exitCode=0 Feb 16 12:23:07 crc kubenswrapper[4949]: I0216 12:23:07.475778 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjfp7" event={"ID":"8dae2b4d-2874-41be-b02a-161eed0e8192","Type":"ContainerDied","Data":"7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb"} Feb 16 12:23:08 crc kubenswrapper[4949]: I0216 12:23:08.497551 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjfp7" event={"ID":"8dae2b4d-2874-41be-b02a-161eed0e8192","Type":"ContainerStarted","Data":"7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294"} Feb 16 12:23:08 crc kubenswrapper[4949]: I0216 12:23:08.527060 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tjfp7" podStartSLOduration=3.006424602 podStartE2EDuration="9.5270195s" podCreationTimestamp="2026-02-16 12:22:59 +0000 UTC" firstStartedPulling="2026-02-16 12:23:01.392318759 +0000 UTC m=+4571.021652924" lastFinishedPulling="2026-02-16 12:23:07.912913617 +0000 UTC m=+4577.542247822" observedRunningTime="2026-02-16 12:23:08.516906532 +0000 UTC m=+4578.146240777" watchObservedRunningTime="2026-02-16 12:23:08.5270195 +0000 UTC m=+4578.156353665" Feb 16 12:23:10 crc kubenswrapper[4949]: I0216 12:23:10.027119 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:23:10 crc kubenswrapper[4949]: I0216 12:23:10.027246 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:23:11 crc kubenswrapper[4949]: I0216 12:23:11.103998 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tjfp7" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="registry-server" probeResult="failure" output=< Feb 16 12:23:11 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 12:23:11 crc kubenswrapper[4949]: > Feb 16 12:23:13 crc kubenswrapper[4949]: I0216 12:23:13.237543 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:23:13 crc kubenswrapper[4949]: E0216 12:23:13.238229 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:23:14 crc kubenswrapper[4949]: E0216 12:23:14.240037 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:23:17 crc kubenswrapper[4949]: E0216 12:23:17.238155 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:23:20 crc kubenswrapper[4949]: I0216 12:23:20.607829 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:23:20 crc kubenswrapper[4949]: I0216 12:23:20.666702 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:23:20 crc kubenswrapper[4949]: I0216 12:23:20.861501 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tjfp7"] Feb 16 12:23:21 crc kubenswrapper[4949]: I0216 12:23:21.681926 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tjfp7" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="registry-server" containerID="cri-o://7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294" gracePeriod=2 Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.287073 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.332531 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bltzh\" (UniqueName: \"kubernetes.io/projected/8dae2b4d-2874-41be-b02a-161eed0e8192-kube-api-access-bltzh\") pod \"8dae2b4d-2874-41be-b02a-161eed0e8192\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.332836 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-catalog-content\") pod \"8dae2b4d-2874-41be-b02a-161eed0e8192\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.332927 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-utilities\") pod \"8dae2b4d-2874-41be-b02a-161eed0e8192\" (UID: \"8dae2b4d-2874-41be-b02a-161eed0e8192\") " Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.335029 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-utilities" (OuterVolumeSpecName: "utilities") pod "8dae2b4d-2874-41be-b02a-161eed0e8192" (UID: "8dae2b4d-2874-41be-b02a-161eed0e8192"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.341894 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dae2b4d-2874-41be-b02a-161eed0e8192-kube-api-access-bltzh" (OuterVolumeSpecName: "kube-api-access-bltzh") pod "8dae2b4d-2874-41be-b02a-161eed0e8192" (UID: "8dae2b4d-2874-41be-b02a-161eed0e8192"). InnerVolumeSpecName "kube-api-access-bltzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.436815 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bltzh\" (UniqueName: \"kubernetes.io/projected/8dae2b4d-2874-41be-b02a-161eed0e8192-kube-api-access-bltzh\") on node \"crc\" DevicePath \"\"" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.436842 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.486855 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8dae2b4d-2874-41be-b02a-161eed0e8192" (UID: "8dae2b4d-2874-41be-b02a-161eed0e8192"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.541948 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dae2b4d-2874-41be-b02a-161eed0e8192-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.696367 4949 generic.go:334] "Generic (PLEG): container finished" podID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerID="7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294" exitCode=0 Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.696474 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tjfp7" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.696502 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjfp7" event={"ID":"8dae2b4d-2874-41be-b02a-161eed0e8192","Type":"ContainerDied","Data":"7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294"} Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.697419 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjfp7" event={"ID":"8dae2b4d-2874-41be-b02a-161eed0e8192","Type":"ContainerDied","Data":"a7849be9a3c863d89da5ea18a953d19169b3320e6b54304854041467a8d96194"} Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.697456 4949 scope.go:117] "RemoveContainer" containerID="7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.734789 4949 scope.go:117] "RemoveContainer" containerID="7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.745589 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tjfp7"] Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.760436 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tjfp7"] Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.784578 4949 scope.go:117] "RemoveContainer" containerID="3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.820657 4949 scope.go:117] "RemoveContainer" containerID="7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294" Feb 16 12:23:22 crc kubenswrapper[4949]: E0216 12:23:22.821357 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294\": container with ID starting with 7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294 not found: ID does not exist" containerID="7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.821400 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294"} err="failed to get container status \"7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294\": rpc error: code = NotFound desc = could not find container \"7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294\": container with ID starting with 7c29682b492dff73009d739771ab6015dede1f302a3d311999f35ba1d59fd294 not found: ID does not exist" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.821430 4949 scope.go:117] "RemoveContainer" containerID="7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb" Feb 16 12:23:22 crc kubenswrapper[4949]: E0216 12:23:22.821850 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb\": container with ID starting with 7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb not found: ID does not exist" containerID="7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.821888 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb"} err="failed to get container status \"7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb\": rpc error: code = NotFound desc = could not find container \"7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb\": container with ID starting with 7d343ee577e47aa4cde0f688e1165b25cb271446bc0a60848b80abea42e3ebbb not found: ID does not exist" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.821916 4949 scope.go:117] "RemoveContainer" containerID="3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e" Feb 16 12:23:22 crc kubenswrapper[4949]: E0216 12:23:22.822352 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e\": container with ID starting with 3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e not found: ID does not exist" containerID="3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e" Feb 16 12:23:22 crc kubenswrapper[4949]: I0216 12:23:22.822560 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e"} err="failed to get container status \"3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e\": rpc error: code = NotFound desc = could not find container \"3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e\": container with ID starting with 3c932627c42043de356c37de9d36a9714246e35f657ca2c3d831f22c03fcb28e not found: ID does not exist" Feb 16 12:23:23 crc kubenswrapper[4949]: I0216 12:23:23.256765 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" path="/var/lib/kubelet/pods/8dae2b4d-2874-41be-b02a-161eed0e8192/volumes" Feb 16 12:23:28 crc kubenswrapper[4949]: I0216 12:23:28.235820 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:23:28 crc kubenswrapper[4949]: E0216 12:23:28.236504 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:23:29 crc kubenswrapper[4949]: E0216 12:23:29.239186 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:23:29 crc kubenswrapper[4949]: E0216 12:23:29.239612 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:23:39 crc kubenswrapper[4949]: I0216 12:23:39.235632 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:23:39 crc kubenswrapper[4949]: E0216 12:23:39.236823 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:23:42 crc kubenswrapper[4949]: E0216 12:23:42.238029 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:23:42 crc kubenswrapper[4949]: E0216 12:23:42.238471 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:23:51 crc kubenswrapper[4949]: I0216 12:23:51.276894 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:23:51 crc kubenswrapper[4949]: E0216 12:23:51.278398 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:23:55 crc kubenswrapper[4949]: E0216 12:23:55.239369 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:23:57 crc kubenswrapper[4949]: E0216 12:23:57.239500 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:24:06 crc kubenswrapper[4949]: I0216 12:24:06.235898 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:24:06 crc kubenswrapper[4949]: E0216 12:24:06.236816 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:24:08 crc kubenswrapper[4949]: E0216 12:24:08.239292 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:24:09 crc kubenswrapper[4949]: E0216 12:24:09.264829 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:24:18 crc kubenswrapper[4949]: I0216 12:24:18.235932 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:24:18 crc kubenswrapper[4949]: E0216 12:24:18.237319 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:24:20 crc kubenswrapper[4949]: E0216 12:24:20.238468 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:24:23 crc kubenswrapper[4949]: E0216 12:24:23.237989 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:24:23 crc kubenswrapper[4949]: I0216 12:24:23.961032 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pc7bx"] Feb 16 12:24:23 crc kubenswrapper[4949]: E0216 12:24:23.961704 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="registry-server" Feb 16 12:24:23 crc kubenswrapper[4949]: I0216 12:24:23.961746 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="registry-server" Feb 16 12:24:23 crc kubenswrapper[4949]: E0216 12:24:23.961771 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="extract-content" Feb 16 12:24:23 crc kubenswrapper[4949]: I0216 12:24:23.961779 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="extract-content" Feb 16 12:24:23 crc kubenswrapper[4949]: E0216 12:24:23.961791 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="extract-utilities" Feb 16 12:24:23 crc kubenswrapper[4949]: I0216 12:24:23.961799 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="extract-utilities" Feb 16 12:24:23 crc kubenswrapper[4949]: I0216 12:24:23.962131 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dae2b4d-2874-41be-b02a-161eed0e8192" containerName="registry-server" Feb 16 12:24:23 crc kubenswrapper[4949]: I0216 12:24:23.964239 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:23 crc kubenswrapper[4949]: I0216 12:24:23.981285 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pc7bx"] Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.054255 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-catalog-content\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.054370 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gs4nr\" (UniqueName: \"kubernetes.io/projected/25f25b13-00e3-44f4-963d-cd39478a74f7-kube-api-access-gs4nr\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.054574 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-utilities\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.156655 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gs4nr\" (UniqueName: \"kubernetes.io/projected/25f25b13-00e3-44f4-963d-cd39478a74f7-kube-api-access-gs4nr\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.156896 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-utilities\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.157016 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-catalog-content\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.157850 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-utilities\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.158130 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-catalog-content\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.178006 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gs4nr\" (UniqueName: \"kubernetes.io/projected/25f25b13-00e3-44f4-963d-cd39478a74f7-kube-api-access-gs4nr\") pod \"community-operators-pc7bx\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.300090 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:24 crc kubenswrapper[4949]: I0216 12:24:24.889243 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pc7bx"] Feb 16 12:24:25 crc kubenswrapper[4949]: I0216 12:24:25.518668 4949 generic.go:334] "Generic (PLEG): container finished" podID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerID="d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c" exitCode=0 Feb 16 12:24:25 crc kubenswrapper[4949]: I0216 12:24:25.518830 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc7bx" event={"ID":"25f25b13-00e3-44f4-963d-cd39478a74f7","Type":"ContainerDied","Data":"d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c"} Feb 16 12:24:25 crc kubenswrapper[4949]: I0216 12:24:25.519121 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc7bx" event={"ID":"25f25b13-00e3-44f4-963d-cd39478a74f7","Type":"ContainerStarted","Data":"e9a69ed8138e97bf5fcc454b1ce20b13a586be445f80a893d6d3e197bb7543eb"} Feb 16 12:24:27 crc kubenswrapper[4949]: I0216 12:24:27.545759 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc7bx" event={"ID":"25f25b13-00e3-44f4-963d-cd39478a74f7","Type":"ContainerStarted","Data":"0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448"} Feb 16 12:24:28 crc kubenswrapper[4949]: I0216 12:24:28.560298 4949 generic.go:334] "Generic (PLEG): container finished" podID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerID="0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448" exitCode=0 Feb 16 12:24:28 crc kubenswrapper[4949]: I0216 12:24:28.560391 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc7bx" event={"ID":"25f25b13-00e3-44f4-963d-cd39478a74f7","Type":"ContainerDied","Data":"0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448"} Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.191768 4949 trace.go:236] Trace[1320463745]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-cell1-server-0" (16-Feb-2026 12:24:28.465) (total time: 1721ms): Feb 16 12:24:30 crc kubenswrapper[4949]: Trace[1320463745]: [1.72170151s] [1.72170151s] END Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.222774 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mwpxx"] Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.240235 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.257649 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:24:30 crc kubenswrapper[4949]: E0216 12:24:30.274688 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.275865 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mwpxx"] Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.276428 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpcs9\" (UniqueName: \"kubernetes.io/projected/c67adadf-9ea0-420b-805e-e15186b4c7b7-kube-api-access-tpcs9\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.276883 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-utilities\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.276920 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-catalog-content\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.380189 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-utilities\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.380241 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-catalog-content\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.380339 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpcs9\" (UniqueName: \"kubernetes.io/projected/c67adadf-9ea0-420b-805e-e15186b4c7b7-kube-api-access-tpcs9\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.380601 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-utilities\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.380734 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-catalog-content\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.406394 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpcs9\" (UniqueName: \"kubernetes.io/projected/c67adadf-9ea0-420b-805e-e15186b4c7b7-kube-api-access-tpcs9\") pod \"certified-operators-mwpxx\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:30 crc kubenswrapper[4949]: I0216 12:24:30.606624 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:31 crc kubenswrapper[4949]: W0216 12:24:31.250110 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc67adadf_9ea0_420b_805e_e15186b4c7b7.slice/crio-fb10492951bb1f86a45c0c708e61fdee354c0d539e4d581946aaa9ed0a7f8381 WatchSource:0}: Error finding container fb10492951bb1f86a45c0c708e61fdee354c0d539e4d581946aaa9ed0a7f8381: Status 404 returned error can't find the container with id fb10492951bb1f86a45c0c708e61fdee354c0d539e4d581946aaa9ed0a7f8381 Feb 16 12:24:31 crc kubenswrapper[4949]: I0216 12:24:31.257885 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mwpxx"] Feb 16 12:24:31 crc kubenswrapper[4949]: I0216 12:24:31.345979 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc7bx" event={"ID":"25f25b13-00e3-44f4-963d-cd39478a74f7","Type":"ContainerStarted","Data":"04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf"} Feb 16 12:24:31 crc kubenswrapper[4949]: I0216 12:24:31.351751 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mwpxx" event={"ID":"c67adadf-9ea0-420b-805e-e15186b4c7b7","Type":"ContainerStarted","Data":"fb10492951bb1f86a45c0c708e61fdee354c0d539e4d581946aaa9ed0a7f8381"} Feb 16 12:24:31 crc kubenswrapper[4949]: I0216 12:24:31.385908 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pc7bx" podStartSLOduration=4.870970807 podStartE2EDuration="8.385876707s" podCreationTimestamp="2026-02-16 12:24:23 +0000 UTC" firstStartedPulling="2026-02-16 12:24:25.521957267 +0000 UTC m=+4655.151291442" lastFinishedPulling="2026-02-16 12:24:29.036863147 +0000 UTC m=+4658.666197342" observedRunningTime="2026-02-16 12:24:31.367350899 +0000 UTC m=+4660.996685134" watchObservedRunningTime="2026-02-16 12:24:31.385876707 +0000 UTC m=+4661.015210912" Feb 16 12:24:32 crc kubenswrapper[4949]: I0216 12:24:32.365634 4949 generic.go:334] "Generic (PLEG): container finished" podID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerID="4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5" exitCode=0 Feb 16 12:24:32 crc kubenswrapper[4949]: I0216 12:24:32.365945 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mwpxx" event={"ID":"c67adadf-9ea0-420b-805e-e15186b4c7b7","Type":"ContainerDied","Data":"4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5"} Feb 16 12:24:33 crc kubenswrapper[4949]: E0216 12:24:33.236290 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:24:33 crc kubenswrapper[4949]: I0216 12:24:33.378984 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mwpxx" event={"ID":"c67adadf-9ea0-420b-805e-e15186b4c7b7","Type":"ContainerStarted","Data":"3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58"} Feb 16 12:24:34 crc kubenswrapper[4949]: I0216 12:24:34.300609 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:34 crc kubenswrapper[4949]: I0216 12:24:34.300664 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:34 crc kubenswrapper[4949]: I0216 12:24:34.380935 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:35 crc kubenswrapper[4949]: I0216 12:24:35.419823 4949 generic.go:334] "Generic (PLEG): container finished" podID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerID="3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58" exitCode=0 Feb 16 12:24:35 crc kubenswrapper[4949]: I0216 12:24:35.419884 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mwpxx" event={"ID":"c67adadf-9ea0-420b-805e-e15186b4c7b7","Type":"ContainerDied","Data":"3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58"} Feb 16 12:24:36 crc kubenswrapper[4949]: I0216 12:24:36.433597 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mwpxx" event={"ID":"c67adadf-9ea0-420b-805e-e15186b4c7b7","Type":"ContainerStarted","Data":"fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48"} Feb 16 12:24:36 crc kubenswrapper[4949]: I0216 12:24:36.456714 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mwpxx" podStartSLOduration=3.978672292 podStartE2EDuration="7.45669753s" podCreationTimestamp="2026-02-16 12:24:29 +0000 UTC" firstStartedPulling="2026-02-16 12:24:32.367822731 +0000 UTC m=+4661.997156896" lastFinishedPulling="2026-02-16 12:24:35.845847959 +0000 UTC m=+4665.475182134" observedRunningTime="2026-02-16 12:24:36.447145017 +0000 UTC m=+4666.076479192" watchObservedRunningTime="2026-02-16 12:24:36.45669753 +0000 UTC m=+4666.086031695" Feb 16 12:24:37 crc kubenswrapper[4949]: E0216 12:24:37.244631 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:24:40 crc kubenswrapper[4949]: I0216 12:24:40.606891 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:40 crc kubenswrapper[4949]: I0216 12:24:40.608288 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:40 crc kubenswrapper[4949]: I0216 12:24:40.666992 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:41 crc kubenswrapper[4949]: I0216 12:24:41.664138 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:41 crc kubenswrapper[4949]: I0216 12:24:41.718214 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mwpxx"] Feb 16 12:24:43 crc kubenswrapper[4949]: I0216 12:24:43.236905 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:24:43 crc kubenswrapper[4949]: E0216 12:24:43.237540 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:24:43 crc kubenswrapper[4949]: I0216 12:24:43.531906 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mwpxx" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerName="registry-server" containerID="cri-o://fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48" gracePeriod=2 Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.084325 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.200992 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-utilities\") pod \"c67adadf-9ea0-420b-805e-e15186b4c7b7\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.201040 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-catalog-content\") pod \"c67adadf-9ea0-420b-805e-e15186b4c7b7\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.201104 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpcs9\" (UniqueName: \"kubernetes.io/projected/c67adadf-9ea0-420b-805e-e15186b4c7b7-kube-api-access-tpcs9\") pod \"c67adadf-9ea0-420b-805e-e15186b4c7b7\" (UID: \"c67adadf-9ea0-420b-805e-e15186b4c7b7\") " Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.201815 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-utilities" (OuterVolumeSpecName: "utilities") pod "c67adadf-9ea0-420b-805e-e15186b4c7b7" (UID: "c67adadf-9ea0-420b-805e-e15186b4c7b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.206320 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c67adadf-9ea0-420b-805e-e15186b4c7b7-kube-api-access-tpcs9" (OuterVolumeSpecName: "kube-api-access-tpcs9") pod "c67adadf-9ea0-420b-805e-e15186b4c7b7" (UID: "c67adadf-9ea0-420b-805e-e15186b4c7b7"). InnerVolumeSpecName "kube-api-access-tpcs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:24:44 crc kubenswrapper[4949]: E0216 12:24:44.248758 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.255571 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c67adadf-9ea0-420b-805e-e15186b4c7b7" (UID: "c67adadf-9ea0-420b-805e-e15186b4c7b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.305950 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.306009 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c67adadf-9ea0-420b-805e-e15186b4c7b7-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.306038 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpcs9\" (UniqueName: \"kubernetes.io/projected/c67adadf-9ea0-420b-805e-e15186b4c7b7-kube-api-access-tpcs9\") on node \"crc\" DevicePath \"\"" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.360887 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.425650 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pc7bx"] Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.547564 4949 generic.go:334] "Generic (PLEG): container finished" podID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerID="fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48" exitCode=0 Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.547752 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pc7bx" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerName="registry-server" containerID="cri-o://04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf" gracePeriod=2 Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.548017 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mwpxx" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.548285 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mwpxx" event={"ID":"c67adadf-9ea0-420b-805e-e15186b4c7b7","Type":"ContainerDied","Data":"fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48"} Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.548347 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mwpxx" event={"ID":"c67adadf-9ea0-420b-805e-e15186b4c7b7","Type":"ContainerDied","Data":"fb10492951bb1f86a45c0c708e61fdee354c0d539e4d581946aaa9ed0a7f8381"} Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.548366 4949 scope.go:117] "RemoveContainer" containerID="fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.591350 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mwpxx"] Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.592595 4949 scope.go:117] "RemoveContainer" containerID="3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.600454 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mwpxx"] Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.637321 4949 scope.go:117] "RemoveContainer" containerID="4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.827121 4949 scope.go:117] "RemoveContainer" containerID="fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48" Feb 16 12:24:44 crc kubenswrapper[4949]: E0216 12:24:44.827602 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48\": container with ID starting with fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48 not found: ID does not exist" containerID="fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.827632 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48"} err="failed to get container status \"fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48\": rpc error: code = NotFound desc = could not find container \"fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48\": container with ID starting with fc4402095a6a2ee0286ad92bfc67645efd40f50b7d12ab1a3c5f20fa9be9bd48 not found: ID does not exist" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.827655 4949 scope.go:117] "RemoveContainer" containerID="3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58" Feb 16 12:24:44 crc kubenswrapper[4949]: E0216 12:24:44.827899 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58\": container with ID starting with 3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58 not found: ID does not exist" containerID="3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.827930 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58"} err="failed to get container status \"3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58\": rpc error: code = NotFound desc = could not find container \"3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58\": container with ID starting with 3dc23bcf6da069007b4516c44f55edcc5617ea96c0f1fecc06058e6c361dbd58 not found: ID does not exist" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.827948 4949 scope.go:117] "RemoveContainer" containerID="4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5" Feb 16 12:24:44 crc kubenswrapper[4949]: E0216 12:24:44.828190 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5\": container with ID starting with 4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5 not found: ID does not exist" containerID="4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5" Feb 16 12:24:44 crc kubenswrapper[4949]: I0216 12:24:44.828215 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5"} err="failed to get container status \"4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5\": rpc error: code = NotFound desc = could not find container \"4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5\": container with ID starting with 4a14edc6c341abdaefc6fe12109a048103afea6ef0d05877ada3d210f8a59bd5 not found: ID does not exist" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.145542 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.230835 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gs4nr\" (UniqueName: \"kubernetes.io/projected/25f25b13-00e3-44f4-963d-cd39478a74f7-kube-api-access-gs4nr\") pod \"25f25b13-00e3-44f4-963d-cd39478a74f7\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.231107 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-catalog-content\") pod \"25f25b13-00e3-44f4-963d-cd39478a74f7\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.231368 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-utilities\") pod \"25f25b13-00e3-44f4-963d-cd39478a74f7\" (UID: \"25f25b13-00e3-44f4-963d-cd39478a74f7\") " Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.232284 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-utilities" (OuterVolumeSpecName: "utilities") pod "25f25b13-00e3-44f4-963d-cd39478a74f7" (UID: "25f25b13-00e3-44f4-963d-cd39478a74f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.245163 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25f25b13-00e3-44f4-963d-cd39478a74f7-kube-api-access-gs4nr" (OuterVolumeSpecName: "kube-api-access-gs4nr") pod "25f25b13-00e3-44f4-963d-cd39478a74f7" (UID: "25f25b13-00e3-44f4-963d-cd39478a74f7"). InnerVolumeSpecName "kube-api-access-gs4nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.251127 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" path="/var/lib/kubelet/pods/c67adadf-9ea0-420b-805e-e15186b4c7b7/volumes" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.294468 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "25f25b13-00e3-44f4-963d-cd39478a74f7" (UID: "25f25b13-00e3-44f4-963d-cd39478a74f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.333786 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.333814 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gs4nr\" (UniqueName: \"kubernetes.io/projected/25f25b13-00e3-44f4-963d-cd39478a74f7-kube-api-access-gs4nr\") on node \"crc\" DevicePath \"\"" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.333823 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25f25b13-00e3-44f4-963d-cd39478a74f7-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.562257 4949 generic.go:334] "Generic (PLEG): container finished" podID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerID="04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf" exitCode=0 Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.562311 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc7bx" event={"ID":"25f25b13-00e3-44f4-963d-cd39478a74f7","Type":"ContainerDied","Data":"04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf"} Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.562330 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pc7bx" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.562357 4949 scope.go:117] "RemoveContainer" containerID="04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.562343 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc7bx" event={"ID":"25f25b13-00e3-44f4-963d-cd39478a74f7","Type":"ContainerDied","Data":"e9a69ed8138e97bf5fcc454b1ce20b13a586be445f80a893d6d3e197bb7543eb"} Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.599139 4949 scope.go:117] "RemoveContainer" containerID="0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.601524 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pc7bx"] Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.613396 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pc7bx"] Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.627347 4949 scope.go:117] "RemoveContainer" containerID="d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.670041 4949 scope.go:117] "RemoveContainer" containerID="04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf" Feb 16 12:24:45 crc kubenswrapper[4949]: E0216 12:24:45.671146 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf\": container with ID starting with 04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf not found: ID does not exist" containerID="04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.671228 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf"} err="failed to get container status \"04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf\": rpc error: code = NotFound desc = could not find container \"04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf\": container with ID starting with 04888b79ba59c15f603a369e59baa15add45c26b7dde5d49632803d87517a4bf not found: ID does not exist" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.671253 4949 scope.go:117] "RemoveContainer" containerID="0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448" Feb 16 12:24:45 crc kubenswrapper[4949]: E0216 12:24:45.671547 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448\": container with ID starting with 0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448 not found: ID does not exist" containerID="0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.671611 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448"} err="failed to get container status \"0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448\": rpc error: code = NotFound desc = could not find container \"0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448\": container with ID starting with 0928f02d1e53feec9cdecb78cc9626de8ee5ec7b4c5edd68c56a0c7e4b942448 not found: ID does not exist" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.671642 4949 scope.go:117] "RemoveContainer" containerID="d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c" Feb 16 12:24:45 crc kubenswrapper[4949]: E0216 12:24:45.672035 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c\": container with ID starting with d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c not found: ID does not exist" containerID="d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c" Feb 16 12:24:45 crc kubenswrapper[4949]: I0216 12:24:45.672078 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c"} err="failed to get container status \"d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c\": rpc error: code = NotFound desc = could not find container \"d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c\": container with ID starting with d9bdc2f451760f2eab0b5f6b326681adb4c4251b0d08e765ef4a51cde16f1a7c not found: ID does not exist" Feb 16 12:24:47 crc kubenswrapper[4949]: I0216 12:24:47.250776 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" path="/var/lib/kubelet/pods/25f25b13-00e3-44f4-963d-cd39478a74f7/volumes" Feb 16 12:24:52 crc kubenswrapper[4949]: E0216 12:24:52.238743 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:24:55 crc kubenswrapper[4949]: E0216 12:24:55.239399 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:24:58 crc kubenswrapper[4949]: I0216 12:24:58.235625 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:24:58 crc kubenswrapper[4949]: E0216 12:24:58.236545 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:25:04 crc kubenswrapper[4949]: E0216 12:25:04.239762 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:25:08 crc kubenswrapper[4949]: E0216 12:25:08.239467 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:25:11 crc kubenswrapper[4949]: I0216 12:25:11.245991 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:25:11 crc kubenswrapper[4949]: I0216 12:25:11.897843 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"1aa2bd076f2e7ef7c43059413de4be403a813a7fa5386d2acf3c90871bdf13ea"} Feb 16 12:25:15 crc kubenswrapper[4949]: E0216 12:25:15.238647 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:25:23 crc kubenswrapper[4949]: E0216 12:25:23.238726 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:25:28 crc kubenswrapper[4949]: E0216 12:25:28.237040 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:25:34 crc kubenswrapper[4949]: E0216 12:25:34.238756 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:25:42 crc kubenswrapper[4949]: E0216 12:25:42.237456 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:25:48 crc kubenswrapper[4949]: E0216 12:25:48.237371 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:25:54 crc kubenswrapper[4949]: E0216 12:25:54.237452 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:25:59 crc kubenswrapper[4949]: E0216 12:25:59.237001 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:26:06 crc kubenswrapper[4949]: E0216 12:26:06.239034 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:26:12 crc kubenswrapper[4949]: E0216 12:26:12.239084 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:26:20 crc kubenswrapper[4949]: E0216 12:26:20.237742 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:26:27 crc kubenswrapper[4949]: E0216 12:26:27.239431 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:26:31 crc kubenswrapper[4949]: E0216 12:26:31.257334 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:26:42 crc kubenswrapper[4949]: E0216 12:26:42.237653 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:26:42 crc kubenswrapper[4949]: E0216 12:26:42.238608 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:26:56 crc kubenswrapper[4949]: E0216 12:26:56.237580 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:26:56 crc kubenswrapper[4949]: I0216 12:26:56.237873 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:26:56 crc kubenswrapper[4949]: E0216 12:26:56.332614 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:26:56 crc kubenswrapper[4949]: E0216 12:26:56.332679 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:26:56 crc kubenswrapper[4949]: E0216 12:26:56.332827 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:26:56 crc kubenswrapper[4949]: E0216 12:26:56.334027 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:27:08 crc kubenswrapper[4949]: E0216 12:27:08.240349 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:27:11 crc kubenswrapper[4949]: E0216 12:27:11.356942 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:27:11 crc kubenswrapper[4949]: E0216 12:27:11.357730 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:27:11 crc kubenswrapper[4949]: E0216 12:27:11.357920 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:27:11 crc kubenswrapper[4949]: E0216 12:27:11.359144 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:27:20 crc kubenswrapper[4949]: E0216 12:27:20.238411 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:27:24 crc kubenswrapper[4949]: E0216 12:27:24.238486 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:27:34 crc kubenswrapper[4949]: E0216 12:27:34.238557 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:27:34 crc kubenswrapper[4949]: I0216 12:27:34.550648 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:27:34 crc kubenswrapper[4949]: I0216 12:27:34.550728 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:27:35 crc kubenswrapper[4949]: E0216 12:27:35.241982 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:27:47 crc kubenswrapper[4949]: E0216 12:27:47.237817 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:27:50 crc kubenswrapper[4949]: E0216 12:27:50.245619 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:28:02 crc kubenswrapper[4949]: E0216 12:28:02.237256 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:28:02 crc kubenswrapper[4949]: E0216 12:28:02.237660 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:28:04 crc kubenswrapper[4949]: I0216 12:28:04.550670 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:28:04 crc kubenswrapper[4949]: I0216 12:28:04.551318 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.076668 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c"] Feb 16 12:28:10 crc kubenswrapper[4949]: E0216 12:28:10.077916 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerName="registry-server" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.077938 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerName="registry-server" Feb 16 12:28:10 crc kubenswrapper[4949]: E0216 12:28:10.077975 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerName="registry-server" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.077987 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerName="registry-server" Feb 16 12:28:10 crc kubenswrapper[4949]: E0216 12:28:10.078015 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerName="extract-content" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.078027 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerName="extract-content" Feb 16 12:28:10 crc kubenswrapper[4949]: E0216 12:28:10.078066 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerName="extract-content" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.078076 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerName="extract-content" Feb 16 12:28:10 crc kubenswrapper[4949]: E0216 12:28:10.078097 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerName="extract-utilities" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.078108 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerName="extract-utilities" Feb 16 12:28:10 crc kubenswrapper[4949]: E0216 12:28:10.078129 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerName="extract-utilities" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.078139 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerName="extract-utilities" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.078537 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="25f25b13-00e3-44f4-963d-cd39478a74f7" containerName="registry-server" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.078568 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="c67adadf-9ea0-420b-805e-e15186b4c7b7" containerName="registry-server" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.079901 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.082685 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.083689 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.083920 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.085744 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-g89vn" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.095104 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c"] Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.172675 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.172821 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.172897 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2hlw\" (UniqueName: \"kubernetes.io/projected/d5a39a2b-f779-4ac6-86ee-db48d7305088-kube-api-access-f2hlw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.275923 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.275995 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2hlw\" (UniqueName: \"kubernetes.io/projected/d5a39a2b-f779-4ac6-86ee-db48d7305088-kube-api-access-f2hlw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.276038 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.283467 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.283693 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.299093 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2hlw\" (UniqueName: \"kubernetes.io/projected/d5a39a2b-f779-4ac6-86ee-db48d7305088-kube-api-access-f2hlw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.415706 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:28:10 crc kubenswrapper[4949]: I0216 12:28:10.982810 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c"] Feb 16 12:28:11 crc kubenswrapper[4949]: I0216 12:28:11.044149 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" event={"ID":"d5a39a2b-f779-4ac6-86ee-db48d7305088","Type":"ContainerStarted","Data":"82424e392df9086a288400c3c5860185bd5dd3383a57df5195fa16bf7d08faca"} Feb 16 12:28:12 crc kubenswrapper[4949]: I0216 12:28:12.056330 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" event={"ID":"d5a39a2b-f779-4ac6-86ee-db48d7305088","Type":"ContainerStarted","Data":"483ac8dba97ce234365beb879f8461c9932e1dac687678a15972da37387f3143"} Feb 16 12:28:12 crc kubenswrapper[4949]: I0216 12:28:12.082547 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" podStartSLOduration=1.479927872 podStartE2EDuration="2.082528988s" podCreationTimestamp="2026-02-16 12:28:10 +0000 UTC" firstStartedPulling="2026-02-16 12:28:10.993951474 +0000 UTC m=+4880.623285639" lastFinishedPulling="2026-02-16 12:28:11.59655257 +0000 UTC m=+4881.225886755" observedRunningTime="2026-02-16 12:28:12.076359952 +0000 UTC m=+4881.705694127" watchObservedRunningTime="2026-02-16 12:28:12.082528988 +0000 UTC m=+4881.711863153" Feb 16 12:28:16 crc kubenswrapper[4949]: E0216 12:28:16.238694 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:28:17 crc kubenswrapper[4949]: E0216 12:28:17.237941 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:28:31 crc kubenswrapper[4949]: E0216 12:28:31.239517 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:28:31 crc kubenswrapper[4949]: E0216 12:28:31.257494 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:28:34 crc kubenswrapper[4949]: I0216 12:28:34.550666 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:28:34 crc kubenswrapper[4949]: I0216 12:28:34.551237 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:28:34 crc kubenswrapper[4949]: I0216 12:28:34.551282 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 12:28:34 crc kubenswrapper[4949]: I0216 12:28:34.552059 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1aa2bd076f2e7ef7c43059413de4be403a813a7fa5386d2acf3c90871bdf13ea"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 12:28:34 crc kubenswrapper[4949]: I0216 12:28:34.552111 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://1aa2bd076f2e7ef7c43059413de4be403a813a7fa5386d2acf3c90871bdf13ea" gracePeriod=600 Feb 16 12:28:35 crc kubenswrapper[4949]: I0216 12:28:35.371894 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="1aa2bd076f2e7ef7c43059413de4be403a813a7fa5386d2acf3c90871bdf13ea" exitCode=0 Feb 16 12:28:35 crc kubenswrapper[4949]: I0216 12:28:35.372113 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"1aa2bd076f2e7ef7c43059413de4be403a813a7fa5386d2acf3c90871bdf13ea"} Feb 16 12:28:35 crc kubenswrapper[4949]: I0216 12:28:35.372551 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83"} Feb 16 12:28:35 crc kubenswrapper[4949]: I0216 12:28:35.372581 4949 scope.go:117] "RemoveContainer" containerID="aface388741cbe2a3499be5b34861516246186ab0c0bbc522e7ee0c88862d5e3" Feb 16 12:28:44 crc kubenswrapper[4949]: E0216 12:28:44.237166 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:28:44 crc kubenswrapper[4949]: E0216 12:28:44.237907 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:28:55 crc kubenswrapper[4949]: E0216 12:28:55.240567 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:28:58 crc kubenswrapper[4949]: E0216 12:28:58.238872 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:29:06 crc kubenswrapper[4949]: E0216 12:29:06.239456 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:29:10 crc kubenswrapper[4949]: E0216 12:29:10.239292 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:29:20 crc kubenswrapper[4949]: E0216 12:29:20.241063 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:29:24 crc kubenswrapper[4949]: E0216 12:29:24.237326 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:29:33 crc kubenswrapper[4949]: E0216 12:29:33.239724 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:29:39 crc kubenswrapper[4949]: E0216 12:29:39.239991 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:29:45 crc kubenswrapper[4949]: E0216 12:29:45.239526 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:29:54 crc kubenswrapper[4949]: E0216 12:29:54.237508 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.150434 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9"] Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.153131 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.156033 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.156291 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.176193 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9"] Feb 16 12:30:00 crc kubenswrapper[4949]: E0216 12:30:00.237459 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.338246 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d0efd1a1-ece5-469c-9262-137304b697a3-config-volume\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.338388 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sp6th\" (UniqueName: \"kubernetes.io/projected/d0efd1a1-ece5-469c-9262-137304b697a3-kube-api-access-sp6th\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.338565 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d0efd1a1-ece5-469c-9262-137304b697a3-secret-volume\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.440648 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sp6th\" (UniqueName: \"kubernetes.io/projected/d0efd1a1-ece5-469c-9262-137304b697a3-kube-api-access-sp6th\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.441064 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d0efd1a1-ece5-469c-9262-137304b697a3-secret-volume\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.441258 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d0efd1a1-ece5-469c-9262-137304b697a3-config-volume\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.442620 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d0efd1a1-ece5-469c-9262-137304b697a3-config-volume\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.448951 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d0efd1a1-ece5-469c-9262-137304b697a3-secret-volume\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.465018 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sp6th\" (UniqueName: \"kubernetes.io/projected/d0efd1a1-ece5-469c-9262-137304b697a3-kube-api-access-sp6th\") pod \"collect-profiles-29520750-nl5q9\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.485413 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:00 crc kubenswrapper[4949]: I0216 12:30:00.954624 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9"] Feb 16 12:30:01 crc kubenswrapper[4949]: I0216 12:30:01.477610 4949 generic.go:334] "Generic (PLEG): container finished" podID="d0efd1a1-ece5-469c-9262-137304b697a3" containerID="34eead2fb556678db531dab711c93083cc93b882387df2a66916025ca7c98c3a" exitCode=0 Feb 16 12:30:01 crc kubenswrapper[4949]: I0216 12:30:01.477667 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" event={"ID":"d0efd1a1-ece5-469c-9262-137304b697a3","Type":"ContainerDied","Data":"34eead2fb556678db531dab711c93083cc93b882387df2a66916025ca7c98c3a"} Feb 16 12:30:01 crc kubenswrapper[4949]: I0216 12:30:01.477963 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" event={"ID":"d0efd1a1-ece5-469c-9262-137304b697a3","Type":"ContainerStarted","Data":"2cfb2a3c8a162f21b87ee5d2965475fd396938dd442a188ece0c45e7aced373e"} Feb 16 12:30:02 crc kubenswrapper[4949]: I0216 12:30:02.924146 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.117744 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sp6th\" (UniqueName: \"kubernetes.io/projected/d0efd1a1-ece5-469c-9262-137304b697a3-kube-api-access-sp6th\") pod \"d0efd1a1-ece5-469c-9262-137304b697a3\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.117950 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d0efd1a1-ece5-469c-9262-137304b697a3-secret-volume\") pod \"d0efd1a1-ece5-469c-9262-137304b697a3\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.118134 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d0efd1a1-ece5-469c-9262-137304b697a3-config-volume\") pod \"d0efd1a1-ece5-469c-9262-137304b697a3\" (UID: \"d0efd1a1-ece5-469c-9262-137304b697a3\") " Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.118770 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0efd1a1-ece5-469c-9262-137304b697a3-config-volume" (OuterVolumeSpecName: "config-volume") pod "d0efd1a1-ece5-469c-9262-137304b697a3" (UID: "d0efd1a1-ece5-469c-9262-137304b697a3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.118898 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d0efd1a1-ece5-469c-9262-137304b697a3-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.132402 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0efd1a1-ece5-469c-9262-137304b697a3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d0efd1a1-ece5-469c-9262-137304b697a3" (UID: "d0efd1a1-ece5-469c-9262-137304b697a3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.132416 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0efd1a1-ece5-469c-9262-137304b697a3-kube-api-access-sp6th" (OuterVolumeSpecName: "kube-api-access-sp6th") pod "d0efd1a1-ece5-469c-9262-137304b697a3" (UID: "d0efd1a1-ece5-469c-9262-137304b697a3"). InnerVolumeSpecName "kube-api-access-sp6th". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.234103 4949 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d0efd1a1-ece5-469c-9262-137304b697a3-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.234402 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sp6th\" (UniqueName: \"kubernetes.io/projected/d0efd1a1-ece5-469c-9262-137304b697a3-kube-api-access-sp6th\") on node \"crc\" DevicePath \"\"" Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.497759 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" event={"ID":"d0efd1a1-ece5-469c-9262-137304b697a3","Type":"ContainerDied","Data":"2cfb2a3c8a162f21b87ee5d2965475fd396938dd442a188ece0c45e7aced373e"} Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.497809 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2cfb2a3c8a162f21b87ee5d2965475fd396938dd442a188ece0c45e7aced373e" Feb 16 12:30:03 crc kubenswrapper[4949]: I0216 12:30:03.497890 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520750-nl5q9" Feb 16 12:30:04 crc kubenswrapper[4949]: I0216 12:30:04.024772 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l"] Feb 16 12:30:04 crc kubenswrapper[4949]: I0216 12:30:04.036769 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520705-qfv6l"] Feb 16 12:30:05 crc kubenswrapper[4949]: I0216 12:30:05.250062 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08ecf914-cb28-4f47-a401-fdce7e6a7f52" path="/var/lib/kubelet/pods/08ecf914-cb28-4f47-a401-fdce7e6a7f52/volumes" Feb 16 12:30:06 crc kubenswrapper[4949]: E0216 12:30:06.238431 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:30:15 crc kubenswrapper[4949]: E0216 12:30:15.238143 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:30:20 crc kubenswrapper[4949]: E0216 12:30:20.237490 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:30:27 crc kubenswrapper[4949]: E0216 12:30:27.237615 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:30:32 crc kubenswrapper[4949]: E0216 12:30:32.238031 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:30:34 crc kubenswrapper[4949]: I0216 12:30:34.550654 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:30:34 crc kubenswrapper[4949]: I0216 12:30:34.550922 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:30:39 crc kubenswrapper[4949]: E0216 12:30:39.238974 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:30:45 crc kubenswrapper[4949]: E0216 12:30:45.237021 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:30:52 crc kubenswrapper[4949]: E0216 12:30:52.237592 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:30:58 crc kubenswrapper[4949]: E0216 12:30:58.237789 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:30:59 crc kubenswrapper[4949]: I0216 12:30:59.986409 4949 scope.go:117] "RemoveContainer" containerID="0939a9289f4a91f76e2feb9b260e3b454a3968a4e2d2938d9f28d3f85b0eb57a" Feb 16 12:31:04 crc kubenswrapper[4949]: I0216 12:31:04.550475 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:31:04 crc kubenswrapper[4949]: I0216 12:31:04.551143 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:31:05 crc kubenswrapper[4949]: E0216 12:31:05.237130 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:31:11 crc kubenswrapper[4949]: E0216 12:31:11.253924 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:31:19 crc kubenswrapper[4949]: E0216 12:31:19.238221 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:31:24 crc kubenswrapper[4949]: E0216 12:31:24.239657 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:31:32 crc kubenswrapper[4949]: E0216 12:31:32.239479 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:31:34 crc kubenswrapper[4949]: I0216 12:31:34.550348 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:31:34 crc kubenswrapper[4949]: I0216 12:31:34.550961 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:31:34 crc kubenswrapper[4949]: I0216 12:31:34.551011 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 12:31:34 crc kubenswrapper[4949]: I0216 12:31:34.551994 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 12:31:34 crc kubenswrapper[4949]: I0216 12:31:34.552056 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" gracePeriod=600 Feb 16 12:31:34 crc kubenswrapper[4949]: E0216 12:31:34.673933 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:31:35 crc kubenswrapper[4949]: I0216 12:31:35.508574 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" exitCode=0 Feb 16 12:31:35 crc kubenswrapper[4949]: I0216 12:31:35.508632 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83"} Feb 16 12:31:35 crc kubenswrapper[4949]: I0216 12:31:35.508684 4949 scope.go:117] "RemoveContainer" containerID="1aa2bd076f2e7ef7c43059413de4be403a813a7fa5386d2acf3c90871bdf13ea" Feb 16 12:31:35 crc kubenswrapper[4949]: I0216 12:31:35.509428 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:31:35 crc kubenswrapper[4949]: E0216 12:31:35.509757 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:31:38 crc kubenswrapper[4949]: E0216 12:31:38.239356 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:31:46 crc kubenswrapper[4949]: E0216 12:31:46.237484 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:31:48 crc kubenswrapper[4949]: I0216 12:31:48.236688 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:31:48 crc kubenswrapper[4949]: E0216 12:31:48.237244 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:31:53 crc kubenswrapper[4949]: E0216 12:31:53.238549 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:31:59 crc kubenswrapper[4949]: I0216 12:31:59.236270 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:31:59 crc kubenswrapper[4949]: E0216 12:31:59.237008 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:32:01 crc kubenswrapper[4949]: I0216 12:32:01.246832 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:32:01 crc kubenswrapper[4949]: E0216 12:32:01.346925 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:32:01 crc kubenswrapper[4949]: E0216 12:32:01.346988 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:32:01 crc kubenswrapper[4949]: E0216 12:32:01.347118 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:32:01 crc kubenswrapper[4949]: E0216 12:32:01.348402 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:32:07 crc kubenswrapper[4949]: E0216 12:32:07.237463 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:32:11 crc kubenswrapper[4949]: I0216 12:32:11.243502 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:32:11 crc kubenswrapper[4949]: E0216 12:32:11.244592 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:32:13 crc kubenswrapper[4949]: E0216 12:32:13.238369 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:32:19 crc kubenswrapper[4949]: E0216 12:32:19.375032 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:32:19 crc kubenswrapper[4949]: E0216 12:32:19.375809 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:32:19 crc kubenswrapper[4949]: E0216 12:32:19.376301 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:32:19 crc kubenswrapper[4949]: E0216 12:32:19.377490 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:32:24 crc kubenswrapper[4949]: I0216 12:32:24.235469 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:32:24 crc kubenswrapper[4949]: E0216 12:32:24.236281 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:32:25 crc kubenswrapper[4949]: E0216 12:32:25.245432 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:32:32 crc kubenswrapper[4949]: E0216 12:32:32.253944 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:32:36 crc kubenswrapper[4949]: E0216 12:32:36.238115 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:32:39 crc kubenswrapper[4949]: I0216 12:32:39.236616 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:32:39 crc kubenswrapper[4949]: E0216 12:32:39.237788 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:32:44 crc kubenswrapper[4949]: E0216 12:32:44.238160 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:32:50 crc kubenswrapper[4949]: E0216 12:32:50.237662 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:32:51 crc kubenswrapper[4949]: I0216 12:32:51.245114 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:32:51 crc kubenswrapper[4949]: E0216 12:32:51.245595 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:32:56 crc kubenswrapper[4949]: E0216 12:32:56.238948 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:33:02 crc kubenswrapper[4949]: E0216 12:33:02.237907 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:33:03 crc kubenswrapper[4949]: I0216 12:33:03.236342 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:33:03 crc kubenswrapper[4949]: E0216 12:33:03.236588 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:33:09 crc kubenswrapper[4949]: E0216 12:33:09.238288 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:33:14 crc kubenswrapper[4949]: I0216 12:33:14.235736 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:33:14 crc kubenswrapper[4949]: E0216 12:33:14.236464 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:33:17 crc kubenswrapper[4949]: E0216 12:33:17.238740 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:33:23 crc kubenswrapper[4949]: E0216 12:33:23.237642 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:33:25 crc kubenswrapper[4949]: I0216 12:33:25.236570 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:33:25 crc kubenswrapper[4949]: E0216 12:33:25.237534 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:33:31 crc kubenswrapper[4949]: E0216 12:33:31.247837 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:33:37 crc kubenswrapper[4949]: E0216 12:33:37.238465 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:33:39 crc kubenswrapper[4949]: I0216 12:33:39.236801 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:33:39 crc kubenswrapper[4949]: E0216 12:33:39.237582 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:33:45 crc kubenswrapper[4949]: E0216 12:33:45.238167 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:33:51 crc kubenswrapper[4949]: E0216 12:33:51.247022 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.629002 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gkwf6"] Feb 16 12:33:52 crc kubenswrapper[4949]: E0216 12:33:52.629892 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0efd1a1-ece5-469c-9262-137304b697a3" containerName="collect-profiles" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.629911 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0efd1a1-ece5-469c-9262-137304b697a3" containerName="collect-profiles" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.630246 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0efd1a1-ece5-469c-9262-137304b697a3" containerName="collect-profiles" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.632513 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.650875 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkwf6"] Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.756732 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-catalog-content\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.757095 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5nwl\" (UniqueName: \"kubernetes.io/projected/e126d644-1ae7-4f07-a34d-5a00398333dc-kube-api-access-g5nwl\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.757325 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-utilities\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.859234 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-utilities\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.859675 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-catalog-content\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.859740 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-utilities\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.859800 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5nwl\" (UniqueName: \"kubernetes.io/projected/e126d644-1ae7-4f07-a34d-5a00398333dc-kube-api-access-g5nwl\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.859929 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-catalog-content\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.881826 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5nwl\" (UniqueName: \"kubernetes.io/projected/e126d644-1ae7-4f07-a34d-5a00398333dc-kube-api-access-g5nwl\") pod \"redhat-marketplace-gkwf6\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:52 crc kubenswrapper[4949]: I0216 12:33:52.969136 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:33:53 crc kubenswrapper[4949]: I0216 12:33:53.235633 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:33:53 crc kubenswrapper[4949]: E0216 12:33:53.236115 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:33:53 crc kubenswrapper[4949]: I0216 12:33:53.469509 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkwf6"] Feb 16 12:33:54 crc kubenswrapper[4949]: I0216 12:33:54.007564 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkwf6" event={"ID":"e126d644-1ae7-4f07-a34d-5a00398333dc","Type":"ContainerStarted","Data":"7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6"} Feb 16 12:33:54 crc kubenswrapper[4949]: I0216 12:33:54.007609 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkwf6" event={"ID":"e126d644-1ae7-4f07-a34d-5a00398333dc","Type":"ContainerStarted","Data":"1ea89a132062383add36dba66bda975849188ca1e536f4614b0a3818f9b39f5a"} Feb 16 12:33:54 crc kubenswrapper[4949]: I0216 12:33:54.834268 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-k4z62"] Feb 16 12:33:54 crc kubenswrapper[4949]: I0216 12:33:54.842816 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:54 crc kubenswrapper[4949]: I0216 12:33:54.845225 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k4z62"] Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.020783 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmdxs\" (UniqueName: \"kubernetes.io/projected/8ee5cd2f-836c-45ed-a242-159e5a9d623b-kube-api-access-hmdxs\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.020948 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-utilities\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.021044 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-catalog-content\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.022750 4949 generic.go:334] "Generic (PLEG): container finished" podID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerID="7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6" exitCode=0 Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.022789 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkwf6" event={"ID":"e126d644-1ae7-4f07-a34d-5a00398333dc","Type":"ContainerDied","Data":"7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6"} Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.122989 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-utilities\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.123399 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-catalog-content\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.123536 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmdxs\" (UniqueName: \"kubernetes.io/projected/8ee5cd2f-836c-45ed-a242-159e5a9d623b-kube-api-access-hmdxs\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.123618 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-utilities\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.123838 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-catalog-content\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.148457 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmdxs\" (UniqueName: \"kubernetes.io/projected/8ee5cd2f-836c-45ed-a242-159e5a9d623b-kube-api-access-hmdxs\") pod \"redhat-operators-k4z62\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.171698 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:33:55 crc kubenswrapper[4949]: W0216 12:33:55.734118 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ee5cd2f_836c_45ed_a242_159e5a9d623b.slice/crio-6fe850580c515d4e4edc5d14e6821d1fdae1e7b6dc586602bece776b2eb9ab2a WatchSource:0}: Error finding container 6fe850580c515d4e4edc5d14e6821d1fdae1e7b6dc586602bece776b2eb9ab2a: Status 404 returned error can't find the container with id 6fe850580c515d4e4edc5d14e6821d1fdae1e7b6dc586602bece776b2eb9ab2a Feb 16 12:33:55 crc kubenswrapper[4949]: I0216 12:33:55.775301 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k4z62"] Feb 16 12:33:56 crc kubenswrapper[4949]: I0216 12:33:56.037693 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkwf6" event={"ID":"e126d644-1ae7-4f07-a34d-5a00398333dc","Type":"ContainerStarted","Data":"caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db"} Feb 16 12:33:56 crc kubenswrapper[4949]: I0216 12:33:56.042957 4949 generic.go:334] "Generic (PLEG): container finished" podID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerID="3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8" exitCode=0 Feb 16 12:33:56 crc kubenswrapper[4949]: I0216 12:33:56.043014 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k4z62" event={"ID":"8ee5cd2f-836c-45ed-a242-159e5a9d623b","Type":"ContainerDied","Data":"3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8"} Feb 16 12:33:56 crc kubenswrapper[4949]: I0216 12:33:56.043045 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k4z62" event={"ID":"8ee5cd2f-836c-45ed-a242-159e5a9d623b","Type":"ContainerStarted","Data":"6fe850580c515d4e4edc5d14e6821d1fdae1e7b6dc586602bece776b2eb9ab2a"} Feb 16 12:33:57 crc kubenswrapper[4949]: I0216 12:33:57.055123 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k4z62" event={"ID":"8ee5cd2f-836c-45ed-a242-159e5a9d623b","Type":"ContainerStarted","Data":"2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4"} Feb 16 12:33:58 crc kubenswrapper[4949]: I0216 12:33:58.066585 4949 generic.go:334] "Generic (PLEG): container finished" podID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerID="caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db" exitCode=0 Feb 16 12:33:58 crc kubenswrapper[4949]: I0216 12:33:58.066644 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkwf6" event={"ID":"e126d644-1ae7-4f07-a34d-5a00398333dc","Type":"ContainerDied","Data":"caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db"} Feb 16 12:33:59 crc kubenswrapper[4949]: I0216 12:33:59.090158 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkwf6" event={"ID":"e126d644-1ae7-4f07-a34d-5a00398333dc","Type":"ContainerStarted","Data":"3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7"} Feb 16 12:33:59 crc kubenswrapper[4949]: I0216 12:33:59.110730 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gkwf6" podStartSLOduration=2.613741898 podStartE2EDuration="7.110712224s" podCreationTimestamp="2026-02-16 12:33:52 +0000 UTC" firstStartedPulling="2026-02-16 12:33:54.010789661 +0000 UTC m=+5223.640123826" lastFinishedPulling="2026-02-16 12:33:58.507759987 +0000 UTC m=+5228.137094152" observedRunningTime="2026-02-16 12:33:59.108572464 +0000 UTC m=+5228.737906679" watchObservedRunningTime="2026-02-16 12:33:59.110712224 +0000 UTC m=+5228.740046389" Feb 16 12:34:00 crc kubenswrapper[4949]: E0216 12:34:00.237410 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:34:01 crc kubenswrapper[4949]: I0216 12:34:01.110530 4949 generic.go:334] "Generic (PLEG): container finished" podID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerID="2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4" exitCode=0 Feb 16 12:34:01 crc kubenswrapper[4949]: I0216 12:34:01.110592 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k4z62" event={"ID":"8ee5cd2f-836c-45ed-a242-159e5a9d623b","Type":"ContainerDied","Data":"2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4"} Feb 16 12:34:02 crc kubenswrapper[4949]: I0216 12:34:02.128246 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k4z62" event={"ID":"8ee5cd2f-836c-45ed-a242-159e5a9d623b","Type":"ContainerStarted","Data":"f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa"} Feb 16 12:34:02 crc kubenswrapper[4949]: I0216 12:34:02.157696 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-k4z62" podStartSLOduration=2.679286124 podStartE2EDuration="8.157677431s" podCreationTimestamp="2026-02-16 12:33:54 +0000 UTC" firstStartedPulling="2026-02-16 12:33:56.044992385 +0000 UTC m=+5225.674326560" lastFinishedPulling="2026-02-16 12:34:01.523383702 +0000 UTC m=+5231.152717867" observedRunningTime="2026-02-16 12:34:02.154184832 +0000 UTC m=+5231.783518987" watchObservedRunningTime="2026-02-16 12:34:02.157677431 +0000 UTC m=+5231.787011616" Feb 16 12:34:02 crc kubenswrapper[4949]: E0216 12:34:02.237391 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:34:02 crc kubenswrapper[4949]: I0216 12:34:02.969356 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:34:02 crc kubenswrapper[4949]: I0216 12:34:02.969923 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:34:03 crc kubenswrapper[4949]: I0216 12:34:03.026370 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:34:03 crc kubenswrapper[4949]: I0216 12:34:03.190289 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:34:05 crc kubenswrapper[4949]: I0216 12:34:05.171792 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:34:05 crc kubenswrapper[4949]: I0216 12:34:05.172048 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:34:05 crc kubenswrapper[4949]: I0216 12:34:05.419824 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkwf6"] Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.170085 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gkwf6" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerName="registry-server" containerID="cri-o://3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7" gracePeriod=2 Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.225559 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-k4z62" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="registry-server" probeResult="failure" output=< Feb 16 12:34:06 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 12:34:06 crc kubenswrapper[4949]: > Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.726428 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.872335 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5nwl\" (UniqueName: \"kubernetes.io/projected/e126d644-1ae7-4f07-a34d-5a00398333dc-kube-api-access-g5nwl\") pod \"e126d644-1ae7-4f07-a34d-5a00398333dc\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.872522 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-utilities\") pod \"e126d644-1ae7-4f07-a34d-5a00398333dc\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.872559 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-catalog-content\") pod \"e126d644-1ae7-4f07-a34d-5a00398333dc\" (UID: \"e126d644-1ae7-4f07-a34d-5a00398333dc\") " Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.873726 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-utilities" (OuterVolumeSpecName: "utilities") pod "e126d644-1ae7-4f07-a34d-5a00398333dc" (UID: "e126d644-1ae7-4f07-a34d-5a00398333dc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.879488 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e126d644-1ae7-4f07-a34d-5a00398333dc-kube-api-access-g5nwl" (OuterVolumeSpecName: "kube-api-access-g5nwl") pod "e126d644-1ae7-4f07-a34d-5a00398333dc" (UID: "e126d644-1ae7-4f07-a34d-5a00398333dc"). InnerVolumeSpecName "kube-api-access-g5nwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.896199 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e126d644-1ae7-4f07-a34d-5a00398333dc" (UID: "e126d644-1ae7-4f07-a34d-5a00398333dc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.976206 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.976247 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e126d644-1ae7-4f07-a34d-5a00398333dc-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:06 crc kubenswrapper[4949]: I0216 12:34:06.976261 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5nwl\" (UniqueName: \"kubernetes.io/projected/e126d644-1ae7-4f07-a34d-5a00398333dc-kube-api-access-g5nwl\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.181202 4949 generic.go:334] "Generic (PLEG): container finished" podID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerID="3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7" exitCode=0 Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.181280 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkwf6" event={"ID":"e126d644-1ae7-4f07-a34d-5a00398333dc","Type":"ContainerDied","Data":"3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7"} Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.181515 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gkwf6" event={"ID":"e126d644-1ae7-4f07-a34d-5a00398333dc","Type":"ContainerDied","Data":"1ea89a132062383add36dba66bda975849188ca1e536f4614b0a3818f9b39f5a"} Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.181536 4949 scope.go:117] "RemoveContainer" containerID="3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.181300 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gkwf6" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.207002 4949 scope.go:117] "RemoveContainer" containerID="caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.220276 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkwf6"] Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.231802 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gkwf6"] Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.238514 4949 scope.go:117] "RemoveContainer" containerID="7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.252953 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" path="/var/lib/kubelet/pods/e126d644-1ae7-4f07-a34d-5a00398333dc/volumes" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.290041 4949 scope.go:117] "RemoveContainer" containerID="3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7" Feb 16 12:34:07 crc kubenswrapper[4949]: E0216 12:34:07.290515 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7\": container with ID starting with 3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7 not found: ID does not exist" containerID="3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.290557 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7"} err="failed to get container status \"3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7\": rpc error: code = NotFound desc = could not find container \"3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7\": container with ID starting with 3af392c2881abdaa7785bb5ae74237ac600c0fbf2daeabf7270443678c6553e7 not found: ID does not exist" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.290584 4949 scope.go:117] "RemoveContainer" containerID="caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db" Feb 16 12:34:07 crc kubenswrapper[4949]: E0216 12:34:07.290838 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db\": container with ID starting with caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db not found: ID does not exist" containerID="caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.290874 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db"} err="failed to get container status \"caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db\": rpc error: code = NotFound desc = could not find container \"caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db\": container with ID starting with caeb027577b6259eacb27cbd6ffda797bc1d8ca50aeb3a939e52606880aa44db not found: ID does not exist" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.290895 4949 scope.go:117] "RemoveContainer" containerID="7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6" Feb 16 12:34:07 crc kubenswrapper[4949]: E0216 12:34:07.291141 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6\": container with ID starting with 7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6 not found: ID does not exist" containerID="7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6" Feb 16 12:34:07 crc kubenswrapper[4949]: I0216 12:34:07.291194 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6"} err="failed to get container status \"7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6\": rpc error: code = NotFound desc = could not find container \"7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6\": container with ID starting with 7cacc4d3067e306e24679c2e8fd3c7475a7375036e98aa47570bbbc12d3150e6 not found: ID does not exist" Feb 16 12:34:08 crc kubenswrapper[4949]: I0216 12:34:08.236195 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:34:08 crc kubenswrapper[4949]: E0216 12:34:08.236792 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:34:13 crc kubenswrapper[4949]: E0216 12:34:13.236837 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:34:14 crc kubenswrapper[4949]: E0216 12:34:14.237978 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:34:15 crc kubenswrapper[4949]: I0216 12:34:15.769761 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:34:15 crc kubenswrapper[4949]: I0216 12:34:15.823620 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:34:16 crc kubenswrapper[4949]: I0216 12:34:16.012476 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k4z62"] Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.280530 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-k4z62" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="registry-server" containerID="cri-o://f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa" gracePeriod=2 Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.791588 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.849213 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-catalog-content\") pod \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.849453 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-utilities\") pod \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.849561 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmdxs\" (UniqueName: \"kubernetes.io/projected/8ee5cd2f-836c-45ed-a242-159e5a9d623b-kube-api-access-hmdxs\") pod \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\" (UID: \"8ee5cd2f-836c-45ed-a242-159e5a9d623b\") " Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.850805 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-utilities" (OuterVolumeSpecName: "utilities") pod "8ee5cd2f-836c-45ed-a242-159e5a9d623b" (UID: "8ee5cd2f-836c-45ed-a242-159e5a9d623b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.861389 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ee5cd2f-836c-45ed-a242-159e5a9d623b-kube-api-access-hmdxs" (OuterVolumeSpecName: "kube-api-access-hmdxs") pod "8ee5cd2f-836c-45ed-a242-159e5a9d623b" (UID: "8ee5cd2f-836c-45ed-a242-159e5a9d623b"). InnerVolumeSpecName "kube-api-access-hmdxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.952330 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmdxs\" (UniqueName: \"kubernetes.io/projected/8ee5cd2f-836c-45ed-a242-159e5a9d623b-kube-api-access-hmdxs\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.952360 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:17 crc kubenswrapper[4949]: I0216 12:34:17.980061 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ee5cd2f-836c-45ed-a242-159e5a9d623b" (UID: "8ee5cd2f-836c-45ed-a242-159e5a9d623b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.054541 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ee5cd2f-836c-45ed-a242-159e5a9d623b-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.296800 4949 generic.go:334] "Generic (PLEG): container finished" podID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerID="f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa" exitCode=0 Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.296890 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k4z62" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.296879 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k4z62" event={"ID":"8ee5cd2f-836c-45ed-a242-159e5a9d623b","Type":"ContainerDied","Data":"f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa"} Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.297346 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k4z62" event={"ID":"8ee5cd2f-836c-45ed-a242-159e5a9d623b","Type":"ContainerDied","Data":"6fe850580c515d4e4edc5d14e6821d1fdae1e7b6dc586602bece776b2eb9ab2a"} Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.297371 4949 scope.go:117] "RemoveContainer" containerID="f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.318984 4949 scope.go:117] "RemoveContainer" containerID="2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.345251 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k4z62"] Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.356265 4949 scope.go:117] "RemoveContainer" containerID="3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.360559 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-k4z62"] Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.417771 4949 scope.go:117] "RemoveContainer" containerID="f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa" Feb 16 12:34:18 crc kubenswrapper[4949]: E0216 12:34:18.418154 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa\": container with ID starting with f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa not found: ID does not exist" containerID="f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.418212 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa"} err="failed to get container status \"f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa\": rpc error: code = NotFound desc = could not find container \"f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa\": container with ID starting with f9a18f31f139b3ec6b49b727f4c808c9d0b71078fecd1e697fe96a0e4be571fa not found: ID does not exist" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.418248 4949 scope.go:117] "RemoveContainer" containerID="2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4" Feb 16 12:34:18 crc kubenswrapper[4949]: E0216 12:34:18.418744 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4\": container with ID starting with 2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4 not found: ID does not exist" containerID="2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.418774 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4"} err="failed to get container status \"2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4\": rpc error: code = NotFound desc = could not find container \"2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4\": container with ID starting with 2a0f7ea24ded8a01901f034ed0a450fe17d4bf2a5ee647afb99077061fb418c4 not found: ID does not exist" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.418794 4949 scope.go:117] "RemoveContainer" containerID="3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8" Feb 16 12:34:18 crc kubenswrapper[4949]: E0216 12:34:18.420809 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8\": container with ID starting with 3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8 not found: ID does not exist" containerID="3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8" Feb 16 12:34:18 crc kubenswrapper[4949]: I0216 12:34:18.420853 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8"} err="failed to get container status \"3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8\": rpc error: code = NotFound desc = could not find container \"3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8\": container with ID starting with 3ffbd84fd844726b65ccfa3ec0626036b0366390d1419de927c7e3e2cfb05bf8 not found: ID does not exist" Feb 16 12:34:19 crc kubenswrapper[4949]: I0216 12:34:19.248940 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" path="/var/lib/kubelet/pods/8ee5cd2f-836c-45ed-a242-159e5a9d623b/volumes" Feb 16 12:34:22 crc kubenswrapper[4949]: I0216 12:34:22.236070 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:34:22 crc kubenswrapper[4949]: E0216 12:34:22.237058 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:34:28 crc kubenswrapper[4949]: E0216 12:34:28.237158 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:34:28 crc kubenswrapper[4949]: E0216 12:34:28.237400 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:34:32 crc kubenswrapper[4949]: I0216 12:34:32.469640 4949 generic.go:334] "Generic (PLEG): container finished" podID="d5a39a2b-f779-4ac6-86ee-db48d7305088" containerID="483ac8dba97ce234365beb879f8461c9932e1dac687678a15972da37387f3143" exitCode=2 Feb 16 12:34:32 crc kubenswrapper[4949]: I0216 12:34:32.469721 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" event={"ID":"d5a39a2b-f779-4ac6-86ee-db48d7305088","Type":"ContainerDied","Data":"483ac8dba97ce234365beb879f8461c9932e1dac687678a15972da37387f3143"} Feb 16 12:34:33 crc kubenswrapper[4949]: I0216 12:34:33.963323 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.019935 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2hlw\" (UniqueName: \"kubernetes.io/projected/d5a39a2b-f779-4ac6-86ee-db48d7305088-kube-api-access-f2hlw\") pod \"d5a39a2b-f779-4ac6-86ee-db48d7305088\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.020068 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-ssh-key-openstack-edpm-ipam\") pod \"d5a39a2b-f779-4ac6-86ee-db48d7305088\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.020219 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-inventory\") pod \"d5a39a2b-f779-4ac6-86ee-db48d7305088\" (UID: \"d5a39a2b-f779-4ac6-86ee-db48d7305088\") " Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.029266 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5a39a2b-f779-4ac6-86ee-db48d7305088-kube-api-access-f2hlw" (OuterVolumeSpecName: "kube-api-access-f2hlw") pod "d5a39a2b-f779-4ac6-86ee-db48d7305088" (UID: "d5a39a2b-f779-4ac6-86ee-db48d7305088"). InnerVolumeSpecName "kube-api-access-f2hlw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.067252 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-inventory" (OuterVolumeSpecName: "inventory") pod "d5a39a2b-f779-4ac6-86ee-db48d7305088" (UID: "d5a39a2b-f779-4ac6-86ee-db48d7305088"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.075918 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d5a39a2b-f779-4ac6-86ee-db48d7305088" (UID: "d5a39a2b-f779-4ac6-86ee-db48d7305088"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.123230 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2hlw\" (UniqueName: \"kubernetes.io/projected/d5a39a2b-f779-4ac6-86ee-db48d7305088-kube-api-access-f2hlw\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.123268 4949 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.123283 4949 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d5a39a2b-f779-4ac6-86ee-db48d7305088-inventory\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.491319 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" event={"ID":"d5a39a2b-f779-4ac6-86ee-db48d7305088","Type":"ContainerDied","Data":"82424e392df9086a288400c3c5860185bd5dd3383a57df5195fa16bf7d08faca"} Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.491685 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82424e392df9086a288400c3c5860185bd5dd3383a57df5195fa16bf7d08faca" Feb 16 12:34:34 crc kubenswrapper[4949]: I0216 12:34:34.491448 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c" Feb 16 12:34:35 crc kubenswrapper[4949]: I0216 12:34:35.235677 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:34:35 crc kubenswrapper[4949]: E0216 12:34:35.236483 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:34:39 crc kubenswrapper[4949]: E0216 12:34:39.255518 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:34:42 crc kubenswrapper[4949]: E0216 12:34:42.238597 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.985131 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dv8h7"] Feb 16 12:34:44 crc kubenswrapper[4949]: E0216 12:34:44.986075 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="extract-utilities" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986088 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="extract-utilities" Feb 16 12:34:44 crc kubenswrapper[4949]: E0216 12:34:44.986119 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerName="extract-content" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986124 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerName="extract-content" Feb 16 12:34:44 crc kubenswrapper[4949]: E0216 12:34:44.986144 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerName="extract-utilities" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986150 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerName="extract-utilities" Feb 16 12:34:44 crc kubenswrapper[4949]: E0216 12:34:44.986164 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="registry-server" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986185 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="registry-server" Feb 16 12:34:44 crc kubenswrapper[4949]: E0216 12:34:44.986198 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5a39a2b-f779-4ac6-86ee-db48d7305088" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986205 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5a39a2b-f779-4ac6-86ee-db48d7305088" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:34:44 crc kubenswrapper[4949]: E0216 12:34:44.986219 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="extract-content" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986225 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="extract-content" Feb 16 12:34:44 crc kubenswrapper[4949]: E0216 12:34:44.986246 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerName="registry-server" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986253 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerName="registry-server" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986469 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5a39a2b-f779-4ac6-86ee-db48d7305088" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986482 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ee5cd2f-836c-45ed-a242-159e5a9d623b" containerName="registry-server" Feb 16 12:34:44 crc kubenswrapper[4949]: I0216 12:34:44.986495 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="e126d644-1ae7-4f07-a34d-5a00398333dc" containerName="registry-server" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:44.988253 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:44.996050 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dv8h7"] Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.108849 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdgs4\" (UniqueName: \"kubernetes.io/projected/eece3a55-6be4-4125-ac31-1d98d0971e93-kube-api-access-vdgs4\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.108977 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-catalog-content\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.109266 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-utilities\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.211189 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-utilities\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.211372 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdgs4\" (UniqueName: \"kubernetes.io/projected/eece3a55-6be4-4125-ac31-1d98d0971e93-kube-api-access-vdgs4\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.211696 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-catalog-content\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.212253 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-utilities\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.212448 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-catalog-content\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.236205 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdgs4\" (UniqueName: \"kubernetes.io/projected/eece3a55-6be4-4125-ac31-1d98d0971e93-kube-api-access-vdgs4\") pod \"community-operators-dv8h7\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.332625 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:45 crc kubenswrapper[4949]: I0216 12:34:45.854930 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dv8h7"] Feb 16 12:34:46 crc kubenswrapper[4949]: I0216 12:34:46.636066 4949 generic.go:334] "Generic (PLEG): container finished" podID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerID="04a86b661731d11bcb96821ed67102df11d2d231632a7f326f98122a51524f85" exitCode=0 Feb 16 12:34:46 crc kubenswrapper[4949]: I0216 12:34:46.636593 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dv8h7" event={"ID":"eece3a55-6be4-4125-ac31-1d98d0971e93","Type":"ContainerDied","Data":"04a86b661731d11bcb96821ed67102df11d2d231632a7f326f98122a51524f85"} Feb 16 12:34:46 crc kubenswrapper[4949]: I0216 12:34:46.636632 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dv8h7" event={"ID":"eece3a55-6be4-4125-ac31-1d98d0971e93","Type":"ContainerStarted","Data":"ee0d827553255349b4bc183e37725d4151376cc706d6b2386b785e48468dc695"} Feb 16 12:34:48 crc kubenswrapper[4949]: I0216 12:34:48.234929 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:34:48 crc kubenswrapper[4949]: E0216 12:34:48.235594 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:34:48 crc kubenswrapper[4949]: I0216 12:34:48.820694 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dv8h7" event={"ID":"eece3a55-6be4-4125-ac31-1d98d0971e93","Type":"ContainerStarted","Data":"ef6d51d799e827ae1ae8cf82df6c401b57745b2f5166b0d9594d25dc112bb1ed"} Feb 16 12:34:49 crc kubenswrapper[4949]: I0216 12:34:49.842562 4949 generic.go:334] "Generic (PLEG): container finished" podID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerID="ef6d51d799e827ae1ae8cf82df6c401b57745b2f5166b0d9594d25dc112bb1ed" exitCode=0 Feb 16 12:34:49 crc kubenswrapper[4949]: I0216 12:34:49.842903 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dv8h7" event={"ID":"eece3a55-6be4-4125-ac31-1d98d0971e93","Type":"ContainerDied","Data":"ef6d51d799e827ae1ae8cf82df6c401b57745b2f5166b0d9594d25dc112bb1ed"} Feb 16 12:34:50 crc kubenswrapper[4949]: I0216 12:34:50.857283 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dv8h7" event={"ID":"eece3a55-6be4-4125-ac31-1d98d0971e93","Type":"ContainerStarted","Data":"7cb19f4edb72150b0b03e97d8f56bd1a8b99cab1f268657fb5d8f0e37d292110"} Feb 16 12:34:50 crc kubenswrapper[4949]: I0216 12:34:50.873018 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dv8h7" podStartSLOduration=3.227937432 podStartE2EDuration="6.872988238s" podCreationTimestamp="2026-02-16 12:34:44 +0000 UTC" firstStartedPulling="2026-02-16 12:34:46.639736093 +0000 UTC m=+5276.269070268" lastFinishedPulling="2026-02-16 12:34:50.284786899 +0000 UTC m=+5279.914121074" observedRunningTime="2026-02-16 12:34:50.872020171 +0000 UTC m=+5280.501354336" watchObservedRunningTime="2026-02-16 12:34:50.872988238 +0000 UTC m=+5280.502322403" Feb 16 12:34:52 crc kubenswrapper[4949]: E0216 12:34:52.238473 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:34:54 crc kubenswrapper[4949]: E0216 12:34:54.239923 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:34:55 crc kubenswrapper[4949]: I0216 12:34:55.334048 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:55 crc kubenswrapper[4949]: I0216 12:34:55.334206 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:55 crc kubenswrapper[4949]: I0216 12:34:55.383831 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:55 crc kubenswrapper[4949]: I0216 12:34:55.952468 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:56 crc kubenswrapper[4949]: I0216 12:34:56.003371 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dv8h7"] Feb 16 12:34:57 crc kubenswrapper[4949]: I0216 12:34:57.933741 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dv8h7" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerName="registry-server" containerID="cri-o://7cb19f4edb72150b0b03e97d8f56bd1a8b99cab1f268657fb5d8f0e37d292110" gracePeriod=2 Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.946435 4949 generic.go:334] "Generic (PLEG): container finished" podID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerID="7cb19f4edb72150b0b03e97d8f56bd1a8b99cab1f268657fb5d8f0e37d292110" exitCode=0 Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.946769 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dv8h7" event={"ID":"eece3a55-6be4-4125-ac31-1d98d0971e93","Type":"ContainerDied","Data":"7cb19f4edb72150b0b03e97d8f56bd1a8b99cab1f268657fb5d8f0e37d292110"} Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.946793 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dv8h7" event={"ID":"eece3a55-6be4-4125-ac31-1d98d0971e93","Type":"ContainerDied","Data":"ee0d827553255349b4bc183e37725d4151376cc706d6b2386b785e48468dc695"} Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.946803 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee0d827553255349b4bc183e37725d4151376cc706d6b2386b785e48468dc695" Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.947452 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.959478 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-utilities\") pod \"eece3a55-6be4-4125-ac31-1d98d0971e93\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.959629 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-catalog-content\") pod \"eece3a55-6be4-4125-ac31-1d98d0971e93\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.959742 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdgs4\" (UniqueName: \"kubernetes.io/projected/eece3a55-6be4-4125-ac31-1d98d0971e93-kube-api-access-vdgs4\") pod \"eece3a55-6be4-4125-ac31-1d98d0971e93\" (UID: \"eece3a55-6be4-4125-ac31-1d98d0971e93\") " Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.960368 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-utilities" (OuterVolumeSpecName: "utilities") pod "eece3a55-6be4-4125-ac31-1d98d0971e93" (UID: "eece3a55-6be4-4125-ac31-1d98d0971e93"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:34:58 crc kubenswrapper[4949]: I0216 12:34:58.966113 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eece3a55-6be4-4125-ac31-1d98d0971e93-kube-api-access-vdgs4" (OuterVolumeSpecName: "kube-api-access-vdgs4") pod "eece3a55-6be4-4125-ac31-1d98d0971e93" (UID: "eece3a55-6be4-4125-ac31-1d98d0971e93"). InnerVolumeSpecName "kube-api-access-vdgs4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:34:59 crc kubenswrapper[4949]: I0216 12:34:59.043019 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eece3a55-6be4-4125-ac31-1d98d0971e93" (UID: "eece3a55-6be4-4125-ac31-1d98d0971e93"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:34:59 crc kubenswrapper[4949]: I0216 12:34:59.063018 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:59 crc kubenswrapper[4949]: I0216 12:34:59.063061 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdgs4\" (UniqueName: \"kubernetes.io/projected/eece3a55-6be4-4125-ac31-1d98d0971e93-kube-api-access-vdgs4\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:59 crc kubenswrapper[4949]: I0216 12:34:59.063078 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eece3a55-6be4-4125-ac31-1d98d0971e93-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:34:59 crc kubenswrapper[4949]: I0216 12:34:59.236094 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:34:59 crc kubenswrapper[4949]: E0216 12:34:59.236638 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:34:59 crc kubenswrapper[4949]: I0216 12:34:59.955984 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dv8h7" Feb 16 12:34:59 crc kubenswrapper[4949]: I0216 12:34:59.995075 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dv8h7"] Feb 16 12:35:00 crc kubenswrapper[4949]: I0216 12:35:00.009750 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dv8h7"] Feb 16 12:35:01 crc kubenswrapper[4949]: I0216 12:35:01.258405 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" path="/var/lib/kubelet/pods/eece3a55-6be4-4125-ac31-1d98d0971e93/volumes" Feb 16 12:35:04 crc kubenswrapper[4949]: E0216 12:35:04.238723 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:35:05 crc kubenswrapper[4949]: E0216 12:35:05.238826 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:35:13 crc kubenswrapper[4949]: I0216 12:35:13.236279 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:35:13 crc kubenswrapper[4949]: E0216 12:35:13.237363 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:35:17 crc kubenswrapper[4949]: E0216 12:35:17.238981 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:35:18 crc kubenswrapper[4949]: E0216 12:35:18.238915 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:35:25 crc kubenswrapper[4949]: I0216 12:35:25.235047 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:35:25 crc kubenswrapper[4949]: E0216 12:35:25.235889 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:35:28 crc kubenswrapper[4949]: E0216 12:35:28.237955 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:35:30 crc kubenswrapper[4949]: E0216 12:35:30.238868 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:35:38 crc kubenswrapper[4949]: I0216 12:35:38.235966 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:35:38 crc kubenswrapper[4949]: E0216 12:35:38.236978 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:35:39 crc kubenswrapper[4949]: E0216 12:35:39.237833 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:35:41 crc kubenswrapper[4949]: E0216 12:35:41.247891 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:35:51 crc kubenswrapper[4949]: I0216 12:35:51.251392 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:35:51 crc kubenswrapper[4949]: E0216 12:35:51.252250 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:35:52 crc kubenswrapper[4949]: E0216 12:35:52.239323 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:35:54 crc kubenswrapper[4949]: E0216 12:35:54.237016 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.628610 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zc6qh/must-gather-8dsjg"] Feb 16 12:35:59 crc kubenswrapper[4949]: E0216 12:35:59.629348 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerName="registry-server" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.629362 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerName="registry-server" Feb 16 12:35:59 crc kubenswrapper[4949]: E0216 12:35:59.629378 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerName="extract-utilities" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.629385 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerName="extract-utilities" Feb 16 12:35:59 crc kubenswrapper[4949]: E0216 12:35:59.629422 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerName="extract-content" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.629428 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerName="extract-content" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.629654 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="eece3a55-6be4-4125-ac31-1d98d0971e93" containerName="registry-server" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.631594 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.634835 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-zc6qh"/"kube-root-ca.crt" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.634953 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-zc6qh"/"openshift-service-ca.crt" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.635242 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-zc6qh"/"default-dockercfg-g7qkd" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.654507 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-zc6qh/must-gather-8dsjg"] Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.700152 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7vzn\" (UniqueName: \"kubernetes.io/projected/462f2b95-35b2-4890-bba9-cb9c59356955-kube-api-access-j7vzn\") pod \"must-gather-8dsjg\" (UID: \"462f2b95-35b2-4890-bba9-cb9c59356955\") " pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.700279 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/462f2b95-35b2-4890-bba9-cb9c59356955-must-gather-output\") pod \"must-gather-8dsjg\" (UID: \"462f2b95-35b2-4890-bba9-cb9c59356955\") " pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.802644 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7vzn\" (UniqueName: \"kubernetes.io/projected/462f2b95-35b2-4890-bba9-cb9c59356955-kube-api-access-j7vzn\") pod \"must-gather-8dsjg\" (UID: \"462f2b95-35b2-4890-bba9-cb9c59356955\") " pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.802826 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/462f2b95-35b2-4890-bba9-cb9c59356955-must-gather-output\") pod \"must-gather-8dsjg\" (UID: \"462f2b95-35b2-4890-bba9-cb9c59356955\") " pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.803544 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/462f2b95-35b2-4890-bba9-cb9c59356955-must-gather-output\") pod \"must-gather-8dsjg\" (UID: \"462f2b95-35b2-4890-bba9-cb9c59356955\") " pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.823586 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7vzn\" (UniqueName: \"kubernetes.io/projected/462f2b95-35b2-4890-bba9-cb9c59356955-kube-api-access-j7vzn\") pod \"must-gather-8dsjg\" (UID: \"462f2b95-35b2-4890-bba9-cb9c59356955\") " pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:35:59 crc kubenswrapper[4949]: I0216 12:35:59.950533 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:36:00 crc kubenswrapper[4949]: I0216 12:36:00.440786 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-zc6qh/must-gather-8dsjg"] Feb 16 12:36:00 crc kubenswrapper[4949]: I0216 12:36:00.635217 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" event={"ID":"462f2b95-35b2-4890-bba9-cb9c59356955","Type":"ContainerStarted","Data":"0dfb74b33d7899d8282a7bf49c8ce032535f29eca543e129435848ed2841d986"} Feb 16 12:36:05 crc kubenswrapper[4949]: I0216 12:36:05.235982 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:36:05 crc kubenswrapper[4949]: E0216 12:36:05.237063 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:36:05 crc kubenswrapper[4949]: E0216 12:36:05.238370 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:36:08 crc kubenswrapper[4949]: I0216 12:36:08.757919 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" event={"ID":"462f2b95-35b2-4890-bba9-cb9c59356955","Type":"ContainerStarted","Data":"fd99e9c6e278a9fd6340338e3ff7b63eab2644a15e120e9b1c567673860be40e"} Feb 16 12:36:08 crc kubenswrapper[4949]: I0216 12:36:08.758426 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" event={"ID":"462f2b95-35b2-4890-bba9-cb9c59356955","Type":"ContainerStarted","Data":"cc58e9f03385bc53a2f6e28aa1626183148747be37adb9c0fa215d67b1cce3c8"} Feb 16 12:36:08 crc kubenswrapper[4949]: I0216 12:36:08.773422 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" podStartSLOduration=2.515362108 podStartE2EDuration="9.773403709s" podCreationTimestamp="2026-02-16 12:35:59 +0000 UTC" firstStartedPulling="2026-02-16 12:36:00.443826784 +0000 UTC m=+5350.073160959" lastFinishedPulling="2026-02-16 12:36:07.701868395 +0000 UTC m=+5357.331202560" observedRunningTime="2026-02-16 12:36:08.770316671 +0000 UTC m=+5358.399650846" watchObservedRunningTime="2026-02-16 12:36:08.773403709 +0000 UTC m=+5358.402737874" Feb 16 12:36:09 crc kubenswrapper[4949]: E0216 12:36:09.240074 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.183588 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zc6qh/crc-debug-9b4hl"] Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.185649 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.294482 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96s4j\" (UniqueName: \"kubernetes.io/projected/dc755a4e-5674-4795-a433-3681e0f04b70-kube-api-access-96s4j\") pod \"crc-debug-9b4hl\" (UID: \"dc755a4e-5674-4795-a433-3681e0f04b70\") " pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.294595 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc755a4e-5674-4795-a433-3681e0f04b70-host\") pod \"crc-debug-9b4hl\" (UID: \"dc755a4e-5674-4795-a433-3681e0f04b70\") " pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.397245 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96s4j\" (UniqueName: \"kubernetes.io/projected/dc755a4e-5674-4795-a433-3681e0f04b70-kube-api-access-96s4j\") pod \"crc-debug-9b4hl\" (UID: \"dc755a4e-5674-4795-a433-3681e0f04b70\") " pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.397349 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc755a4e-5674-4795-a433-3681e0f04b70-host\") pod \"crc-debug-9b4hl\" (UID: \"dc755a4e-5674-4795-a433-3681e0f04b70\") " pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.398522 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc755a4e-5674-4795-a433-3681e0f04b70-host\") pod \"crc-debug-9b4hl\" (UID: \"dc755a4e-5674-4795-a433-3681e0f04b70\") " pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.418263 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96s4j\" (UniqueName: \"kubernetes.io/projected/dc755a4e-5674-4795-a433-3681e0f04b70-kube-api-access-96s4j\") pod \"crc-debug-9b4hl\" (UID: \"dc755a4e-5674-4795-a433-3681e0f04b70\") " pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.506634 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:14 crc kubenswrapper[4949]: I0216 12:36:14.825402 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" event={"ID":"dc755a4e-5674-4795-a433-3681e0f04b70","Type":"ContainerStarted","Data":"14fa35338ba86647d5b67329f8fbd4cddce357ed6e3bab6942637b7232ad0b23"} Feb 16 12:36:17 crc kubenswrapper[4949]: I0216 12:36:17.235932 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:36:17 crc kubenswrapper[4949]: E0216 12:36:17.238536 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:36:18 crc kubenswrapper[4949]: E0216 12:36:18.237994 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:36:23 crc kubenswrapper[4949]: E0216 12:36:23.237282 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:36:26 crc kubenswrapper[4949]: I0216 12:36:26.948461 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" event={"ID":"dc755a4e-5674-4795-a433-3681e0f04b70","Type":"ContainerStarted","Data":"e0ecfdb596d8e16dcb99e956083569640e1fdd56f0ec1d4b6f4dc939d13dbb6b"} Feb 16 12:36:26 crc kubenswrapper[4949]: I0216 12:36:26.987720 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" podStartSLOduration=0.943294233 podStartE2EDuration="12.987701115s" podCreationTimestamp="2026-02-16 12:36:14 +0000 UTC" firstStartedPulling="2026-02-16 12:36:14.54993717 +0000 UTC m=+5364.179271335" lastFinishedPulling="2026-02-16 12:36:26.594344052 +0000 UTC m=+5376.223678217" observedRunningTime="2026-02-16 12:36:26.975513052 +0000 UTC m=+5376.604847217" watchObservedRunningTime="2026-02-16 12:36:26.987701115 +0000 UTC m=+5376.617035280" Feb 16 12:36:30 crc kubenswrapper[4949]: I0216 12:36:30.756353 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5ntc2"] Feb 16 12:36:30 crc kubenswrapper[4949]: I0216 12:36:30.763886 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:30 crc kubenswrapper[4949]: I0216 12:36:30.800357 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5ntc2"] Feb 16 12:36:30 crc kubenswrapper[4949]: I0216 12:36:30.900789 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqmhr\" (UniqueName: \"kubernetes.io/projected/30805ff6-6e05-4234-a24d-739d7b59a5ee-kube-api-access-mqmhr\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:30 crc kubenswrapper[4949]: I0216 12:36:30.900909 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-utilities\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:30 crc kubenswrapper[4949]: I0216 12:36:30.900985 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-catalog-content\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:31 crc kubenswrapper[4949]: I0216 12:36:31.003493 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqmhr\" (UniqueName: \"kubernetes.io/projected/30805ff6-6e05-4234-a24d-739d7b59a5ee-kube-api-access-mqmhr\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:31 crc kubenswrapper[4949]: I0216 12:36:31.003625 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-utilities\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:31 crc kubenswrapper[4949]: I0216 12:36:31.003708 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-catalog-content\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:31 crc kubenswrapper[4949]: I0216 12:36:31.004499 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-utilities\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:31 crc kubenswrapper[4949]: I0216 12:36:31.004796 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-catalog-content\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:31 crc kubenswrapper[4949]: I0216 12:36:31.027741 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqmhr\" (UniqueName: \"kubernetes.io/projected/30805ff6-6e05-4234-a24d-739d7b59a5ee-kube-api-access-mqmhr\") pod \"certified-operators-5ntc2\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:31 crc kubenswrapper[4949]: I0216 12:36:31.096517 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:31 crc kubenswrapper[4949]: I0216 12:36:31.704900 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5ntc2"] Feb 16 12:36:32 crc kubenswrapper[4949]: I0216 12:36:32.236605 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:36:32 crc kubenswrapper[4949]: E0216 12:36:32.237143 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:36:33 crc kubenswrapper[4949]: I0216 12:36:33.030026 4949 generic.go:334] "Generic (PLEG): container finished" podID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerID="a885e1b189c781b3f6b1b3e7d49bf337c4656f379196c1e19e5f2672866853ab" exitCode=0 Feb 16 12:36:33 crc kubenswrapper[4949]: I0216 12:36:33.030120 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ntc2" event={"ID":"30805ff6-6e05-4234-a24d-739d7b59a5ee","Type":"ContainerDied","Data":"a885e1b189c781b3f6b1b3e7d49bf337c4656f379196c1e19e5f2672866853ab"} Feb 16 12:36:33 crc kubenswrapper[4949]: I0216 12:36:33.030338 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ntc2" event={"ID":"30805ff6-6e05-4234-a24d-739d7b59a5ee","Type":"ContainerStarted","Data":"accd12bd37230107cdc6d8cea992b7635b4d45015162ed29473640deedbbb8ee"} Feb 16 12:36:33 crc kubenswrapper[4949]: E0216 12:36:33.242815 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:36:34 crc kubenswrapper[4949]: E0216 12:36:34.277109 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:36:36 crc kubenswrapper[4949]: I0216 12:36:36.061680 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ntc2" event={"ID":"30805ff6-6e05-4234-a24d-739d7b59a5ee","Type":"ContainerStarted","Data":"953f7a0c2f631114f968e6690e90a872e3637d2efe170a0d0809bd56b5ca94a4"} Feb 16 12:36:38 crc kubenswrapper[4949]: I0216 12:36:38.083491 4949 generic.go:334] "Generic (PLEG): container finished" podID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerID="953f7a0c2f631114f968e6690e90a872e3637d2efe170a0d0809bd56b5ca94a4" exitCode=0 Feb 16 12:36:38 crc kubenswrapper[4949]: I0216 12:36:38.083565 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ntc2" event={"ID":"30805ff6-6e05-4234-a24d-739d7b59a5ee","Type":"ContainerDied","Data":"953f7a0c2f631114f968e6690e90a872e3637d2efe170a0d0809bd56b5ca94a4"} Feb 16 12:36:39 crc kubenswrapper[4949]: I0216 12:36:39.097884 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ntc2" event={"ID":"30805ff6-6e05-4234-a24d-739d7b59a5ee","Type":"ContainerStarted","Data":"4977d1df46377eea0789cd9b59e90636fb9d8457c4854f6aea5641718759427f"} Feb 16 12:36:39 crc kubenswrapper[4949]: I0216 12:36:39.124860 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5ntc2" podStartSLOduration=5.576344313 podStartE2EDuration="9.124842899s" podCreationTimestamp="2026-02-16 12:36:30 +0000 UTC" firstStartedPulling="2026-02-16 12:36:34.970530245 +0000 UTC m=+5384.599864410" lastFinishedPulling="2026-02-16 12:36:38.519028831 +0000 UTC m=+5388.148362996" observedRunningTime="2026-02-16 12:36:39.117267546 +0000 UTC m=+5388.746601731" watchObservedRunningTime="2026-02-16 12:36:39.124842899 +0000 UTC m=+5388.754177064" Feb 16 12:36:41 crc kubenswrapper[4949]: I0216 12:36:41.097014 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:41 crc kubenswrapper[4949]: I0216 12:36:41.097559 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:41 crc kubenswrapper[4949]: I0216 12:36:41.153501 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:44 crc kubenswrapper[4949]: I0216 12:36:44.148430 4949 generic.go:334] "Generic (PLEG): container finished" podID="dc755a4e-5674-4795-a433-3681e0f04b70" containerID="e0ecfdb596d8e16dcb99e956083569640e1fdd56f0ec1d4b6f4dc939d13dbb6b" exitCode=0 Feb 16 12:36:44 crc kubenswrapper[4949]: I0216 12:36:44.148587 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" event={"ID":"dc755a4e-5674-4795-a433-3681e0f04b70","Type":"ContainerDied","Data":"e0ecfdb596d8e16dcb99e956083569640e1fdd56f0ec1d4b6f4dc939d13dbb6b"} Feb 16 12:36:44 crc kubenswrapper[4949]: I0216 12:36:44.236279 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:36:44 crc kubenswrapper[4949]: E0216 12:36:44.238421 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.161390 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"d10535a2c33e7c98837e7814d2faabb2c1191b7db54bf9280f66b8dd9f5db5f9"} Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.321917 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.359093 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zc6qh/crc-debug-9b4hl"] Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.375362 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zc6qh/crc-debug-9b4hl"] Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.451405 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96s4j\" (UniqueName: \"kubernetes.io/projected/dc755a4e-5674-4795-a433-3681e0f04b70-kube-api-access-96s4j\") pod \"dc755a4e-5674-4795-a433-3681e0f04b70\" (UID: \"dc755a4e-5674-4795-a433-3681e0f04b70\") " Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.451708 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc755a4e-5674-4795-a433-3681e0f04b70-host\") pod \"dc755a4e-5674-4795-a433-3681e0f04b70\" (UID: \"dc755a4e-5674-4795-a433-3681e0f04b70\") " Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.452207 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dc755a4e-5674-4795-a433-3681e0f04b70-host" (OuterVolumeSpecName: "host") pod "dc755a4e-5674-4795-a433-3681e0f04b70" (UID: "dc755a4e-5674-4795-a433-3681e0f04b70"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.452340 4949 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc755a4e-5674-4795-a433-3681e0f04b70-host\") on node \"crc\" DevicePath \"\"" Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.459541 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc755a4e-5674-4795-a433-3681e0f04b70-kube-api-access-96s4j" (OuterVolumeSpecName: "kube-api-access-96s4j") pod "dc755a4e-5674-4795-a433-3681e0f04b70" (UID: "dc755a4e-5674-4795-a433-3681e0f04b70"). InnerVolumeSpecName "kube-api-access-96s4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:36:45 crc kubenswrapper[4949]: I0216 12:36:45.555025 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96s4j\" (UniqueName: \"kubernetes.io/projected/dc755a4e-5674-4795-a433-3681e0f04b70-kube-api-access-96s4j\") on node \"crc\" DevicePath \"\"" Feb 16 12:36:46 crc kubenswrapper[4949]: I0216 12:36:46.174532 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14fa35338ba86647d5b67329f8fbd4cddce357ed6e3bab6942637b7232ad0b23" Feb 16 12:36:46 crc kubenswrapper[4949]: I0216 12:36:46.174589 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/crc-debug-9b4hl" Feb 16 12:36:46 crc kubenswrapper[4949]: I0216 12:36:46.742069 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zc6qh/crc-debug-f4dp4"] Feb 16 12:36:46 crc kubenswrapper[4949]: E0216 12:36:46.742812 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc755a4e-5674-4795-a433-3681e0f04b70" containerName="container-00" Feb 16 12:36:46 crc kubenswrapper[4949]: I0216 12:36:46.742829 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc755a4e-5674-4795-a433-3681e0f04b70" containerName="container-00" Feb 16 12:36:46 crc kubenswrapper[4949]: I0216 12:36:46.743043 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc755a4e-5674-4795-a433-3681e0f04b70" containerName="container-00" Feb 16 12:36:46 crc kubenswrapper[4949]: I0216 12:36:46.745720 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:46 crc kubenswrapper[4949]: I0216 12:36:46.896808 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b70ab82e-eada-4f3e-9bb7-7c139df21770-host\") pod \"crc-debug-f4dp4\" (UID: \"b70ab82e-eada-4f3e-9bb7-7c139df21770\") " pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:46 crc kubenswrapper[4949]: I0216 12:36:46.897150 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8bzc\" (UniqueName: \"kubernetes.io/projected/b70ab82e-eada-4f3e-9bb7-7c139df21770-kube-api-access-v8bzc\") pod \"crc-debug-f4dp4\" (UID: \"b70ab82e-eada-4f3e-9bb7-7c139df21770\") " pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:47 crc kubenswrapper[4949]: I0216 12:36:47.000163 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b70ab82e-eada-4f3e-9bb7-7c139df21770-host\") pod \"crc-debug-f4dp4\" (UID: \"b70ab82e-eada-4f3e-9bb7-7c139df21770\") " pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:47 crc kubenswrapper[4949]: I0216 12:36:47.000269 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b70ab82e-eada-4f3e-9bb7-7c139df21770-host\") pod \"crc-debug-f4dp4\" (UID: \"b70ab82e-eada-4f3e-9bb7-7c139df21770\") " pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:47 crc kubenswrapper[4949]: I0216 12:36:47.000573 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8bzc\" (UniqueName: \"kubernetes.io/projected/b70ab82e-eada-4f3e-9bb7-7c139df21770-kube-api-access-v8bzc\") pod \"crc-debug-f4dp4\" (UID: \"b70ab82e-eada-4f3e-9bb7-7c139df21770\") " pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:47 crc kubenswrapper[4949]: I0216 12:36:47.020912 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8bzc\" (UniqueName: \"kubernetes.io/projected/b70ab82e-eada-4f3e-9bb7-7c139df21770-kube-api-access-v8bzc\") pod \"crc-debug-f4dp4\" (UID: \"b70ab82e-eada-4f3e-9bb7-7c139df21770\") " pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:47 crc kubenswrapper[4949]: I0216 12:36:47.065085 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:47 crc kubenswrapper[4949]: W0216 12:36:47.100398 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb70ab82e_eada_4f3e_9bb7_7c139df21770.slice/crio-2823631e6ddc8f58a22039bb44ad540b4ad9f9d3e75aba7833921d4b615aaa62 WatchSource:0}: Error finding container 2823631e6ddc8f58a22039bb44ad540b4ad9f9d3e75aba7833921d4b615aaa62: Status 404 returned error can't find the container with id 2823631e6ddc8f58a22039bb44ad540b4ad9f9d3e75aba7833921d4b615aaa62 Feb 16 12:36:47 crc kubenswrapper[4949]: I0216 12:36:47.192528 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" event={"ID":"b70ab82e-eada-4f3e-9bb7-7c139df21770","Type":"ContainerStarted","Data":"2823631e6ddc8f58a22039bb44ad540b4ad9f9d3e75aba7833921d4b615aaa62"} Feb 16 12:36:47 crc kubenswrapper[4949]: I0216 12:36:47.257461 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc755a4e-5674-4795-a433-3681e0f04b70" path="/var/lib/kubelet/pods/dc755a4e-5674-4795-a433-3681e0f04b70/volumes" Feb 16 12:36:48 crc kubenswrapper[4949]: I0216 12:36:48.208108 4949 generic.go:334] "Generic (PLEG): container finished" podID="b70ab82e-eada-4f3e-9bb7-7c139df21770" containerID="dc45e553627ce0d7e2b8b5547929c2a67d85bf0c1b02c793beb7cdc026089b77" exitCode=1 Feb 16 12:36:48 crc kubenswrapper[4949]: I0216 12:36:48.208597 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" event={"ID":"b70ab82e-eada-4f3e-9bb7-7c139df21770","Type":"ContainerDied","Data":"dc45e553627ce0d7e2b8b5547929c2a67d85bf0c1b02c793beb7cdc026089b77"} Feb 16 12:36:48 crc kubenswrapper[4949]: I0216 12:36:48.264678 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zc6qh/crc-debug-f4dp4"] Feb 16 12:36:48 crc kubenswrapper[4949]: I0216 12:36:48.274831 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zc6qh/crc-debug-f4dp4"] Feb 16 12:36:49 crc kubenswrapper[4949]: E0216 12:36:49.240854 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:36:49 crc kubenswrapper[4949]: I0216 12:36:49.375139 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:49 crc kubenswrapper[4949]: I0216 12:36:49.561367 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b70ab82e-eada-4f3e-9bb7-7c139df21770-host\") pod \"b70ab82e-eada-4f3e-9bb7-7c139df21770\" (UID: \"b70ab82e-eada-4f3e-9bb7-7c139df21770\") " Feb 16 12:36:49 crc kubenswrapper[4949]: I0216 12:36:49.561492 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b70ab82e-eada-4f3e-9bb7-7c139df21770-host" (OuterVolumeSpecName: "host") pod "b70ab82e-eada-4f3e-9bb7-7c139df21770" (UID: "b70ab82e-eada-4f3e-9bb7-7c139df21770"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 16 12:36:49 crc kubenswrapper[4949]: I0216 12:36:49.561506 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8bzc\" (UniqueName: \"kubernetes.io/projected/b70ab82e-eada-4f3e-9bb7-7c139df21770-kube-api-access-v8bzc\") pod \"b70ab82e-eada-4f3e-9bb7-7c139df21770\" (UID: \"b70ab82e-eada-4f3e-9bb7-7c139df21770\") " Feb 16 12:36:49 crc kubenswrapper[4949]: I0216 12:36:49.562121 4949 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b70ab82e-eada-4f3e-9bb7-7c139df21770-host\") on node \"crc\" DevicePath \"\"" Feb 16 12:36:49 crc kubenswrapper[4949]: I0216 12:36:49.573392 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b70ab82e-eada-4f3e-9bb7-7c139df21770-kube-api-access-v8bzc" (OuterVolumeSpecName: "kube-api-access-v8bzc") pod "b70ab82e-eada-4f3e-9bb7-7c139df21770" (UID: "b70ab82e-eada-4f3e-9bb7-7c139df21770"). InnerVolumeSpecName "kube-api-access-v8bzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:36:49 crc kubenswrapper[4949]: I0216 12:36:49.664832 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8bzc\" (UniqueName: \"kubernetes.io/projected/b70ab82e-eada-4f3e-9bb7-7c139df21770-kube-api-access-v8bzc\") on node \"crc\" DevicePath \"\"" Feb 16 12:36:50 crc kubenswrapper[4949]: I0216 12:36:50.233132 4949 scope.go:117] "RemoveContainer" containerID="dc45e553627ce0d7e2b8b5547929c2a67d85bf0c1b02c793beb7cdc026089b77" Feb 16 12:36:50 crc kubenswrapper[4949]: I0216 12:36:50.233651 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/crc-debug-f4dp4" Feb 16 12:36:51 crc kubenswrapper[4949]: I0216 12:36:51.159642 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:51 crc kubenswrapper[4949]: I0216 12:36:51.253634 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b70ab82e-eada-4f3e-9bb7-7c139df21770" path="/var/lib/kubelet/pods/b70ab82e-eada-4f3e-9bb7-7c139df21770/volumes" Feb 16 12:36:52 crc kubenswrapper[4949]: I0216 12:36:52.794472 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5ntc2"] Feb 16 12:36:52 crc kubenswrapper[4949]: I0216 12:36:52.795264 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5ntc2" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerName="registry-server" containerID="cri-o://4977d1df46377eea0789cd9b59e90636fb9d8457c4854f6aea5641718759427f" gracePeriod=2 Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.269843 4949 generic.go:334] "Generic (PLEG): container finished" podID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerID="4977d1df46377eea0789cd9b59e90636fb9d8457c4854f6aea5641718759427f" exitCode=0 Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.270052 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ntc2" event={"ID":"30805ff6-6e05-4234-a24d-739d7b59a5ee","Type":"ContainerDied","Data":"4977d1df46377eea0789cd9b59e90636fb9d8457c4854f6aea5641718759427f"} Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.270075 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ntc2" event={"ID":"30805ff6-6e05-4234-a24d-739d7b59a5ee","Type":"ContainerDied","Data":"accd12bd37230107cdc6d8cea992b7635b4d45015162ed29473640deedbbb8ee"} Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.270089 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="accd12bd37230107cdc6d8cea992b7635b4d45015162ed29473640deedbbb8ee" Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.375209 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.566092 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-utilities\") pod \"30805ff6-6e05-4234-a24d-739d7b59a5ee\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.566664 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-catalog-content\") pod \"30805ff6-6e05-4234-a24d-739d7b59a5ee\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.566815 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqmhr\" (UniqueName: \"kubernetes.io/projected/30805ff6-6e05-4234-a24d-739d7b59a5ee-kube-api-access-mqmhr\") pod \"30805ff6-6e05-4234-a24d-739d7b59a5ee\" (UID: \"30805ff6-6e05-4234-a24d-739d7b59a5ee\") " Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.567043 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-utilities" (OuterVolumeSpecName: "utilities") pod "30805ff6-6e05-4234-a24d-739d7b59a5ee" (UID: "30805ff6-6e05-4234-a24d-739d7b59a5ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.568301 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.612018 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "30805ff6-6e05-4234-a24d-739d7b59a5ee" (UID: "30805ff6-6e05-4234-a24d-739d7b59a5ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:36:53 crc kubenswrapper[4949]: I0216 12:36:53.671629 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30805ff6-6e05-4234-a24d-739d7b59a5ee-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:36:54 crc kubenswrapper[4949]: I0216 12:36:54.122509 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30805ff6-6e05-4234-a24d-739d7b59a5ee-kube-api-access-mqmhr" (OuterVolumeSpecName: "kube-api-access-mqmhr") pod "30805ff6-6e05-4234-a24d-739d7b59a5ee" (UID: "30805ff6-6e05-4234-a24d-739d7b59a5ee"). InnerVolumeSpecName "kube-api-access-mqmhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:36:54 crc kubenswrapper[4949]: I0216 12:36:54.189661 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqmhr\" (UniqueName: \"kubernetes.io/projected/30805ff6-6e05-4234-a24d-739d7b59a5ee-kube-api-access-mqmhr\") on node \"crc\" DevicePath \"\"" Feb 16 12:36:54 crc kubenswrapper[4949]: I0216 12:36:54.283125 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5ntc2" Feb 16 12:36:54 crc kubenswrapper[4949]: I0216 12:36:54.422348 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5ntc2"] Feb 16 12:36:54 crc kubenswrapper[4949]: I0216 12:36:54.433433 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5ntc2"] Feb 16 12:36:55 crc kubenswrapper[4949]: I0216 12:36:55.254296 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" path="/var/lib/kubelet/pods/30805ff6-6e05-4234-a24d-739d7b59a5ee/volumes" Feb 16 12:36:59 crc kubenswrapper[4949]: E0216 12:36:59.246256 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:37:04 crc kubenswrapper[4949]: E0216 12:37:04.241653 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:37:14 crc kubenswrapper[4949]: I0216 12:37:14.237436 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:37:14 crc kubenswrapper[4949]: E0216 12:37:14.342637 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:37:14 crc kubenswrapper[4949]: E0216 12:37:14.342711 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:37:14 crc kubenswrapper[4949]: E0216 12:37:14.342877 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:37:14 crc kubenswrapper[4949]: E0216 12:37:14.344484 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:37:16 crc kubenswrapper[4949]: E0216 12:37:16.237269 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:37:26 crc kubenswrapper[4949]: E0216 12:37:26.238698 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:37:30 crc kubenswrapper[4949]: E0216 12:37:30.379817 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:37:30 crc kubenswrapper[4949]: E0216 12:37:30.380434 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:37:30 crc kubenswrapper[4949]: E0216 12:37:30.380628 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:37:30 crc kubenswrapper[4949]: E0216 12:37:30.383602 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:37:39 crc kubenswrapper[4949]: E0216 12:37:39.237978 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:37:42 crc kubenswrapper[4949]: E0216 12:37:42.237284 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:37:52 crc kubenswrapper[4949]: E0216 12:37:52.245462 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:37:56 crc kubenswrapper[4949]: E0216 12:37:56.237143 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:38:01 crc kubenswrapper[4949]: I0216 12:38:01.745286 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_46afdda5-3045-4445-bf9a-d040c4c9eac3/aodh-api/0.log" Feb 16 12:38:01 crc kubenswrapper[4949]: I0216 12:38:01.950349 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_46afdda5-3045-4445-bf9a-d040c4c9eac3/aodh-evaluator/0.log" Feb 16 12:38:01 crc kubenswrapper[4949]: I0216 12:38:01.993041 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_46afdda5-3045-4445-bf9a-d040c4c9eac3/aodh-listener/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.010710 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_46afdda5-3045-4445-bf9a-d040c4c9eac3/aodh-notifier/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.132984 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6dd8f56bbd-txvgd_1a5a86a0-f27c-4c48-a9fb-2dfdca066751/barbican-api/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.171145 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6dd8f56bbd-txvgd_1a5a86a0-f27c-4c48-a9fb-2dfdca066751/barbican-api-log/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.252595 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-64879558f4-wnmkq_942ae44c-f919-40ba-b0a0-eb112962e586/barbican-keystone-listener/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.379244 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-64879558f4-wnmkq_942ae44c-f919-40ba-b0a0-eb112962e586/barbican-keystone-listener-log/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.450621 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6f5cdc9c5-rfrdp_52de476f-078d-48ff-a705-b647c492b187/barbican-worker/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.517018 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6f5cdc9c5-rfrdp_52de476f-078d-48ff-a705-b647c492b187/barbican-worker-log/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.673094 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-qdqw2_f26a3c47-2f6f-481f-b344-964ee178b1d8/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.916354 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_c69d7379-6f2b-45ae-8972-71e223a337a8/ceilometer-notification-agent/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.935069 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_c69d7379-6f2b-45ae-8972-71e223a337a8/sg-core/0.log" Feb 16 12:38:02 crc kubenswrapper[4949]: I0216 12:38:02.973209 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_c69d7379-6f2b-45ae-8972-71e223a337a8/proxy-httpd/0.log" Feb 16 12:38:03 crc kubenswrapper[4949]: I0216 12:38:03.372765 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_9962c0a5-0f9e-4564-95d0-f128685c2473/cinder-api-log/0.log" Feb 16 12:38:03 crc kubenswrapper[4949]: I0216 12:38:03.491806 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_9962c0a5-0f9e-4564-95d0-f128685c2473/cinder-api/0.log" Feb 16 12:38:03 crc kubenswrapper[4949]: I0216 12:38:03.508821 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5842411c-0b82-455a-a060-0546d00907be/cinder-scheduler/0.log" Feb 16 12:38:03 crc kubenswrapper[4949]: I0216 12:38:03.673279 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5842411c-0b82-455a-a060-0546d00907be/probe/0.log" Feb 16 12:38:03 crc kubenswrapper[4949]: I0216 12:38:03.724434 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-fbntz_922556c8-4241-4be1-99cd-66eda9892b23/init/0.log" Feb 16 12:38:03 crc kubenswrapper[4949]: I0216 12:38:03.947804 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-fbntz_922556c8-4241-4be1-99cd-66eda9892b23/init/0.log" Feb 16 12:38:03 crc kubenswrapper[4949]: I0216 12:38:03.958837 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-fbntz_922556c8-4241-4be1-99cd-66eda9892b23/dnsmasq-dns/0.log" Feb 16 12:38:04 crc kubenswrapper[4949]: I0216 12:38:04.073948 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-c7g6c_d5a39a2b-f779-4ac6-86ee-db48d7305088/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:04 crc kubenswrapper[4949]: I0216 12:38:04.249653 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-gqs69_9991be76-b16a-4afd-bcc6-05dc7dfe9da1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:04 crc kubenswrapper[4949]: I0216 12:38:04.333946 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-p5g5b_4583cb4e-c70d-4638-a948-75a5f5cfc593/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:04 crc kubenswrapper[4949]: I0216 12:38:04.585938 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-q89fn_5cd889af-3e25-4f40-bebf-f1861b7549ed/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:04 crc kubenswrapper[4949]: I0216 12:38:04.656775 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-qgrk6_b4b77ec2-a1cb-437b-86a9-a9554e316035/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:04 crc kubenswrapper[4949]: I0216 12:38:04.892718 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-r4kfr_1e96f01e-1b19-4190-9109-75322770d9ba/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:04 crc kubenswrapper[4949]: I0216 12:38:04.938974 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-rgqdj_83b6a7c1-807f-4f19-b519-75879c54d0c5/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:05 crc kubenswrapper[4949]: I0216 12:38:05.169416 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_b9a6f88b-57b3-4f64-a4aa-84d0529fdf82/glance-httpd/0.log" Feb 16 12:38:05 crc kubenswrapper[4949]: I0216 12:38:05.205813 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_b9a6f88b-57b3-4f64-a4aa-84d0529fdf82/glance-log/0.log" Feb 16 12:38:05 crc kubenswrapper[4949]: I0216 12:38:05.355470 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_394f965e-6650-4b88-91f3-b93a1bf0efa7/glance-httpd/0.log" Feb 16 12:38:05 crc kubenswrapper[4949]: I0216 12:38:05.388833 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_394f965e-6650-4b88-91f3-b93a1bf0efa7/glance-log/0.log" Feb 16 12:38:05 crc kubenswrapper[4949]: I0216 12:38:05.865051 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-5cdc74fffc-n2hcr_ecc029c7-0e9a-4211-8632-56c7b5e1b179/heat-api/0.log" Feb 16 12:38:06 crc kubenswrapper[4949]: I0216 12:38:06.119688 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-566c9b565f-fv7vz_c2936fd0-97ae-4028-a1b7-27feb4919790/heat-cfnapi/0.log" Feb 16 12:38:06 crc kubenswrapper[4949]: E0216 12:38:06.237453 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:38:06 crc kubenswrapper[4949]: I0216 12:38:06.356709 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-7b8694cbb9-kdjtx_53ab577c-be20-45fa-9c92-52524f44c90a/heat-engine/0.log" Feb 16 12:38:06 crc kubenswrapper[4949]: I0216 12:38:06.504385 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-59b8c4c946-z72ns_cf7adfe6-84e8-4873-86b3-275c617e3917/keystone-api/0.log" Feb 16 12:38:06 crc kubenswrapper[4949]: I0216 12:38:06.579422 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29520721-mvkww_d40cbc9a-bb66-4817-9728-c46d635e4ed9/keystone-cron/0.log" Feb 16 12:38:07 crc kubenswrapper[4949]: I0216 12:38:07.461522 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_4d408b50-de06-4fdc-a945-afcd98fb4775/kube-state-metrics/0.log" Feb 16 12:38:07 crc kubenswrapper[4949]: I0216 12:38:07.518405 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_ecc4a304-dde4-4f96-9b54-e0df21ac37c3/mysqld-exporter/0.log" Feb 16 12:38:07 crc kubenswrapper[4949]: I0216 12:38:07.805394 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-cff99c4df-6j2pt_9dcea626-54bc-4dba-a5d5-6df79c77216a/neutron-httpd/0.log" Feb 16 12:38:07 crc kubenswrapper[4949]: I0216 12:38:07.814574 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-cff99c4df-6j2pt_9dcea626-54bc-4dba-a5d5-6df79c77216a/neutron-api/0.log" Feb 16 12:38:08 crc kubenswrapper[4949]: I0216 12:38:08.180739 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_4e78c7bd-f9e0-49da-83cb-e6bff985ad7d/nova-api-log/0.log" Feb 16 12:38:08 crc kubenswrapper[4949]: I0216 12:38:08.241426 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_0711e67d-8056-4831-b1a8-1ff9e0399a76/nova-cell0-conductor-conductor/0.log" Feb 16 12:38:08 crc kubenswrapper[4949]: I0216 12:38:08.565363 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_4e78c7bd-f9e0-49da-83cb-e6bff985ad7d/nova-api-api/0.log" Feb 16 12:38:08 crc kubenswrapper[4949]: I0216 12:38:08.571473 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_6395848e-1b80-4956-a3a6-80941876e3f9/nova-cell1-conductor-conductor/0.log" Feb 16 12:38:09 crc kubenswrapper[4949]: I0216 12:38:09.165288 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9/nova-metadata-log/0.log" Feb 16 12:38:09 crc kubenswrapper[4949]: I0216 12:38:09.263073 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_e7103704-b3b4-4de6-8ca7-b70b44b10cd6/nova-cell1-novncproxy-novncproxy/0.log" Feb 16 12:38:09 crc kubenswrapper[4949]: I0216 12:38:09.547842 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_9a5b2f17-57bf-4aad-b18c-d1ec47f358c9/mysql-bootstrap/0.log" Feb 16 12:38:09 crc kubenswrapper[4949]: I0216 12:38:09.592042 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_df3c5421-aaa8-41cb-9e45-fb91a87c89e9/nova-scheduler-scheduler/0.log" Feb 16 12:38:10 crc kubenswrapper[4949]: I0216 12:38:10.041880 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_9a5b2f17-57bf-4aad-b18c-d1ec47f358c9/mysql-bootstrap/0.log" Feb 16 12:38:10 crc kubenswrapper[4949]: I0216 12:38:10.067729 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_9a5b2f17-57bf-4aad-b18c-d1ec47f358c9/galera/0.log" Feb 16 12:38:10 crc kubenswrapper[4949]: E0216 12:38:10.239068 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:38:10 crc kubenswrapper[4949]: I0216 12:38:10.275576 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f8511a9d-0c08-43c9-9243-f340d75fabe1/mysql-bootstrap/0.log" Feb 16 12:38:10 crc kubenswrapper[4949]: I0216 12:38:10.765480 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f8511a9d-0c08-43c9-9243-f340d75fabe1/mysql-bootstrap/0.log" Feb 16 12:38:10 crc kubenswrapper[4949]: I0216 12:38:10.784578 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f8511a9d-0c08-43c9-9243-f340d75fabe1/galera/0.log" Feb 16 12:38:10 crc kubenswrapper[4949]: I0216 12:38:10.998349 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_fea9f5ef-1f12-446a-ab82-50631b44a37f/openstackclient/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.172428 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-clw7g_dea77106-f4b1-4515-80bb-ebad1a6effcf/ovn-controller/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.301313 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-cswq4_99904114-3c39-45d5-84b6-35b9543bdf3a/openstack-network-exporter/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.507057 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4a3a859f-f5a1-4ca8-b71d-f526e24ac4b9/nova-metadata-metadata/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.543941 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-fbgr9_9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c/ovsdb-server-init/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.712023 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-fbgr9_9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c/ovsdb-server-init/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.787578 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-fbgr9_9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c/ovsdb-server/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.793457 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-fbgr9_9dc5fe0b-3ce0-4647-8c29-f17c359e9f4c/ovs-vswitchd/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.905156 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e580bb4f-88f7-4c69-8eb5-669fc9733593/openstack-network-exporter/0.log" Feb 16 12:38:11 crc kubenswrapper[4949]: I0216 12:38:11.981654 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e580bb4f-88f7-4c69-8eb5-669fc9733593/ovn-northd/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.091846 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d9ba865a-34ea-4b4b-80a4-0d35a63dd064/openstack-network-exporter/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.111087 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d9ba865a-34ea-4b4b-80a4-0d35a63dd064/ovsdbserver-nb/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.288477 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_0c47e2a0-d831-46ee-a13a-93b4c487c4d9/openstack-network-exporter/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.361435 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_0c47e2a0-d831-46ee-a13a-93b4c487c4d9/ovsdbserver-sb/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.563900 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-8fd485cb6-bhx7b_a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc/placement-api/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.630075 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-8fd485cb6-bhx7b_a74bfcd5-7bd3-49aa-8e77-c1e17fc431dc/placement-log/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.722510 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_e8565369-4065-464b-8f76-56b3689744e9/init-config-reloader/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.905022 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_e8565369-4065-464b-8f76-56b3689744e9/prometheus/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.948494 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_e8565369-4065-464b-8f76-56b3689744e9/config-reloader/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.951189 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_e8565369-4065-464b-8f76-56b3689744e9/init-config-reloader/0.log" Feb 16 12:38:12 crc kubenswrapper[4949]: I0216 12:38:12.992834 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_e8565369-4065-464b-8f76-56b3689744e9/thanos-sidecar/0.log" Feb 16 12:38:13 crc kubenswrapper[4949]: I0216 12:38:13.199355 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_a059bd07-34ae-4e84-8ffd-19eb56597b33/setup-container/0.log" Feb 16 12:38:13 crc kubenswrapper[4949]: I0216 12:38:13.381643 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_a059bd07-34ae-4e84-8ffd-19eb56597b33/setup-container/0.log" Feb 16 12:38:13 crc kubenswrapper[4949]: I0216 12:38:13.408865 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_a059bd07-34ae-4e84-8ffd-19eb56597b33/rabbitmq/0.log" Feb 16 12:38:13 crc kubenswrapper[4949]: I0216 12:38:13.438956 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_4664279d-8b37-426a-a677-a3e982fb6beb/setup-container/0.log" Feb 16 12:38:13 crc kubenswrapper[4949]: I0216 12:38:13.938667 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_4664279d-8b37-426a-a677-a3e982fb6beb/rabbitmq/0.log" Feb 16 12:38:13 crc kubenswrapper[4949]: I0216 12:38:13.952274 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_4664279d-8b37-426a-a677-a3e982fb6beb/setup-container/0.log" Feb 16 12:38:13 crc kubenswrapper[4949]: I0216 12:38:13.974411 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_e0597426-aa70-4dc2-a6dc-c6c2aeea1f27/setup-container/0.log" Feb 16 12:38:14 crc kubenswrapper[4949]: I0216 12:38:14.202737 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_e0597426-aa70-4dc2-a6dc-c6c2aeea1f27/setup-container/0.log" Feb 16 12:38:14 crc kubenswrapper[4949]: I0216 12:38:14.315833 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_701d055f-9bdd-4661-94ac-d8e04866c31f/setup-container/0.log" Feb 16 12:38:14 crc kubenswrapper[4949]: I0216 12:38:14.407816 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_e0597426-aa70-4dc2-a6dc-c6c2aeea1f27/rabbitmq/0.log" Feb 16 12:38:14 crc kubenswrapper[4949]: I0216 12:38:14.472478 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_701d055f-9bdd-4661-94ac-d8e04866c31f/setup-container/0.log" Feb 16 12:38:14 crc kubenswrapper[4949]: I0216 12:38:14.605141 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_701d055f-9bdd-4661-94ac-d8e04866c31f/rabbitmq/0.log" Feb 16 12:38:14 crc kubenswrapper[4949]: I0216 12:38:14.629995 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-f25z6_8bd581a9-2646-4045-bfb1-a0a4e356936d/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:14 crc kubenswrapper[4949]: I0216 12:38:14.865082 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-g8nbv_86d7ec9a-5ff1-4d16-8a2a-d979cdb4154a/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.009801 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-65dcd67cf9-prcpn_f27f5322-b5fc-4c02-ac21-6cf820ce08ce/proxy-server/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.072845 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-65dcd67cf9-prcpn_f27f5322-b5fc-4c02-ac21-6cf820ce08ce/proxy-httpd/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.266676 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/account-auditor/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.269522 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-nvcsr_3aa21f38-ab8c-47b9-9ef0-f879e28eb01f/swift-ring-rebalance/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.304966 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/account-reaper/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.468146 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/account-server/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.476816 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/account-replicator/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.523971 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/container-replicator/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.584416 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/container-auditor/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.672916 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/container-server/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.697323 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/container-updater/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.763163 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/object-auditor/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.822305 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/object-expirer/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.890548 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/object-replicator/0.log" Feb 16 12:38:15 crc kubenswrapper[4949]: I0216 12:38:15.930477 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/object-server/0.log" Feb 16 12:38:16 crc kubenswrapper[4949]: I0216 12:38:16.069850 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/rsync/0.log" Feb 16 12:38:16 crc kubenswrapper[4949]: I0216 12:38:16.078739 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/object-updater/0.log" Feb 16 12:38:16 crc kubenswrapper[4949]: I0216 12:38:16.146890 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b8fafaac-cbaa-4726-91b8-b0739034455f/swift-recon-cron/0.log" Feb 16 12:38:21 crc kubenswrapper[4949]: E0216 12:38:21.256343 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:38:21 crc kubenswrapper[4949]: I0216 12:38:21.866525 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_03acf817-d9f3-4c65-b4c0-920136bc3d7b/memcached/0.log" Feb 16 12:38:23 crc kubenswrapper[4949]: E0216 12:38:23.237890 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:38:33 crc kubenswrapper[4949]: E0216 12:38:33.237130 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:38:38 crc kubenswrapper[4949]: E0216 12:38:38.242478 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:38:45 crc kubenswrapper[4949]: E0216 12:38:45.237769 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:38:46 crc kubenswrapper[4949]: I0216 12:38:46.251851 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6d8bf5c495-zm28n_f0769aea-7db8-4dcb-bbde-8a4b918b3fa7/manager/0.log" Feb 16 12:38:46 crc kubenswrapper[4949]: I0216 12:38:46.473697 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv_c531de36-0700-4513-84c9-ba4da7b9afde/util/0.log" Feb 16 12:38:47 crc kubenswrapper[4949]: I0216 12:38:47.209924 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv_c531de36-0700-4513-84c9-ba4da7b9afde/pull/0.log" Feb 16 12:38:47 crc kubenswrapper[4949]: I0216 12:38:47.243327 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv_c531de36-0700-4513-84c9-ba4da7b9afde/util/0.log" Feb 16 12:38:47 crc kubenswrapper[4949]: I0216 12:38:47.417166 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv_c531de36-0700-4513-84c9-ba4da7b9afde/pull/0.log" Feb 16 12:38:47 crc kubenswrapper[4949]: I0216 12:38:47.596633 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv_c531de36-0700-4513-84c9-ba4da7b9afde/util/0.log" Feb 16 12:38:47 crc kubenswrapper[4949]: I0216 12:38:47.637689 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv_c531de36-0700-4513-84c9-ba4da7b9afde/pull/0.log" Feb 16 12:38:47 crc kubenswrapper[4949]: I0216 12:38:47.790884 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_fb8767c25a457251b2669501481e586de5c4c83792e0dec9bfa5ebbd13bljwv_c531de36-0700-4513-84c9-ba4da7b9afde/extract/0.log" Feb 16 12:38:48 crc kubenswrapper[4949]: I0216 12:38:48.094735 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-77987464f4-w59wt_482d88cb-1680-4276-8373-bf565231eadf/manager/0.log" Feb 16 12:38:48 crc kubenswrapper[4949]: I0216 12:38:48.535931 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-69f49c598c-r82d9_7927070f-ccfa-49c3-9a88-6fdee387c97c/manager/0.log" Feb 16 12:38:48 crc kubenswrapper[4949]: I0216 12:38:48.613677 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5b9b8895d5-j7vgc_6b381c9a-6963-419d-b96c-81ca6ea674d3/manager/0.log" Feb 16 12:38:48 crc kubenswrapper[4949]: I0216 12:38:48.839131 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-5d946d989d-7ndss_14ac83e9-3142-4e62-b2a5-789822ea3013/manager/0.log" Feb 16 12:38:49 crc kubenswrapper[4949]: I0216 12:38:49.647392 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-554564d7fc-d928p_b81e3e22-4860-4765-9683-675c1fbbefef/manager/0.log" Feb 16 12:38:49 crc kubenswrapper[4949]: I0216 12:38:49.836780 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79d975b745-6ms2x_4274f89e-0708-44ec-9bbb-3bb865c71d82/manager/0.log" Feb 16 12:38:49 crc kubenswrapper[4949]: I0216 12:38:49.944123 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b4d948c87-57rt6_feaf7854-ba7b-4246-827d-941656a7f10b/manager/0.log" Feb 16 12:38:50 crc kubenswrapper[4949]: I0216 12:38:50.086052 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-54f6768c69-lzkxk_6c160a96-7508-45af-82b7-37ca399bb5af/manager/0.log" Feb 16 12:38:50 crc kubenswrapper[4949]: I0216 12:38:50.195587 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6994f66f48-jv8z8_ad558c16-93d3-4e12-8feb-56516d331bbe/manager/0.log" Feb 16 12:38:50 crc kubenswrapper[4949]: I0216 12:38:50.405503 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-64ddbf8bb-wsh6f_70f95803-8857-4a40-b133-c12031e17f77/manager/0.log" Feb 16 12:38:50 crc kubenswrapper[4949]: I0216 12:38:50.550057 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-567668f5cf-cx2cw_20c60056-65e4-486e-8b5e-bf7aef44b9bc/manager/0.log" Feb 16 12:38:50 crc kubenswrapper[4949]: I0216 12:38:50.692444 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7c6767dc9clpgjd_f583e5c3-96ec-417a-8d47-541896c301fb/manager/0.log" Feb 16 12:38:51 crc kubenswrapper[4949]: I0216 12:38:51.129948 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-55dffc8d68-xs4j5_af6b2570-6c23-4589-b650-e3a7db2cb482/operator/0.log" Feb 16 12:38:51 crc kubenswrapper[4949]: I0216 12:38:51.312240 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-llhm4_dd48c3ca-bfee-4c5d-8ddf-46e04dd300a9/registry-server/0.log" Feb 16 12:38:51 crc kubenswrapper[4949]: I0216 12:38:51.601862 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-d44cf6b75-vsknd_c179d330-538e-4fc6-afeb-bc3bfdf92569/manager/0.log" Feb 16 12:38:51 crc kubenswrapper[4949]: I0216 12:38:51.792765 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-8497b45c89-dsfbf_6f3a40af-b9a5-4709-8173-fb62a0d053e8/manager/0.log" Feb 16 12:38:51 crc kubenswrapper[4949]: I0216 12:38:51.984700 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-p9cl5_9a87a761-d2b8-4202-98c2-391fdb512cc4/operator/0.log" Feb 16 12:38:52 crc kubenswrapper[4949]: I0216 12:38:52.207792 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-68f46476f-dk4s8_f6c8332e-9a8f-44dc-ac2b-8180c68d8f0f/manager/0.log" Feb 16 12:38:52 crc kubenswrapper[4949]: E0216 12:38:52.237431 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:38:52 crc kubenswrapper[4949]: I0216 12:38:52.643450 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7866795846-f4l9p_10670483-76f3-4774-9aa4-a0c21ff5799e/manager/0.log" Feb 16 12:38:52 crc kubenswrapper[4949]: I0216 12:38:52.982030 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6b65fbbb9f-7j44g_8ac656c0-2570-4fd7-acbb-96182ff1bc95/manager/0.log" Feb 16 12:38:53 crc kubenswrapper[4949]: I0216 12:38:53.017039 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5db88f68c-kwgzs_06428bd6-f2fd-44ab-b71a-fe48ec54189d/manager/0.log" Feb 16 12:38:53 crc kubenswrapper[4949]: I0216 12:38:53.021112 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-64b85768bb-85t96_6c8f8a0e-a378-4b98-8c9b-e5180c97e088/manager/0.log" Feb 16 12:38:53 crc kubenswrapper[4949]: I0216 12:38:53.862949 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-69f8888797-4bjjf_d1e2149a-cad2-4e22-822c-d5afb8294a25/manager/0.log" Feb 16 12:38:57 crc kubenswrapper[4949]: E0216 12:38:57.236986 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:38:59 crc kubenswrapper[4949]: I0216 12:38:59.205708 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-868647ff47-blpwl_3608d840-be2f-478e-8252-e41f5480853a/manager/0.log" Feb 16 12:39:04 crc kubenswrapper[4949]: I0216 12:39:04.550129 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:39:04 crc kubenswrapper[4949]: I0216 12:39:04.550618 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:39:05 crc kubenswrapper[4949]: E0216 12:39:05.239538 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:39:11 crc kubenswrapper[4949]: E0216 12:39:11.245506 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:39:15 crc kubenswrapper[4949]: I0216 12:39:15.247862 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-nnrft_20a94b4e-2cd0-430e-9f2f-e805706f3b3d/control-plane-machine-set-operator/0.log" Feb 16 12:39:15 crc kubenswrapper[4949]: I0216 12:39:15.910059 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-9ddvv_bd39da7f-e838-4de5-9fcb-afb858bfe8ca/kube-rbac-proxy/0.log" Feb 16 12:39:15 crc kubenswrapper[4949]: I0216 12:39:15.993689 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-9ddvv_bd39da7f-e838-4de5-9fcb-afb858bfe8ca/machine-api-operator/0.log" Feb 16 12:39:18 crc kubenswrapper[4949]: E0216 12:39:18.238393 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:39:23 crc kubenswrapper[4949]: E0216 12:39:23.238834 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:39:29 crc kubenswrapper[4949]: I0216 12:39:29.202615 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-vkmw4_2337a301-ac3a-4802-8b4a-0cbb713cb547/cert-manager-controller/0.log" Feb 16 12:39:29 crc kubenswrapper[4949]: I0216 12:39:29.458987 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-skb68_cd46089f-617c-42c5-b7af-c083934b98f8/cert-manager-cainjector/0.log" Feb 16 12:39:29 crc kubenswrapper[4949]: I0216 12:39:29.527535 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-cdp5z_61c29aa4-a4a6-439c-9a73-fb9e237a09a2/cert-manager-webhook/0.log" Feb 16 12:39:31 crc kubenswrapper[4949]: E0216 12:39:31.251415 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:39:34 crc kubenswrapper[4949]: E0216 12:39:34.238660 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:39:34 crc kubenswrapper[4949]: I0216 12:39:34.550773 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:39:34 crc kubenswrapper[4949]: I0216 12:39:34.550825 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:39:42 crc kubenswrapper[4949]: I0216 12:39:42.980635 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5c78fc5d65-6zbjh_84824090-52b6-4a69-ad49-b441b666b14d/nmstate-console-plugin/0.log" Feb 16 12:39:43 crc kubenswrapper[4949]: I0216 12:39:43.160326 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-xtxbl_0e02bbc5-1e5b-41da-b16b-c42a001af050/nmstate-handler/0.log" Feb 16 12:39:43 crc kubenswrapper[4949]: I0216 12:39:43.224278 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-58c85c668d-rnprv_dab83125-a691-419c-b901-8476bc8881d4/kube-rbac-proxy/0.log" Feb 16 12:39:43 crc kubenswrapper[4949]: I0216 12:39:43.292313 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-58c85c668d-rnprv_dab83125-a691-419c-b901-8476bc8881d4/nmstate-metrics/0.log" Feb 16 12:39:43 crc kubenswrapper[4949]: I0216 12:39:43.403254 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-694c9596b7-z2z4x_8dac3bfe-a7f1-41bf-8bdf-9ca17fa87d15/nmstate-operator/0.log" Feb 16 12:39:43 crc kubenswrapper[4949]: I0216 12:39:43.478053 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-866bcb46dc-qbzzp_50b488d3-6c66-4a7d-baaf-e7b7c30ca1f1/nmstate-webhook/0.log" Feb 16 12:39:46 crc kubenswrapper[4949]: E0216 12:39:46.237825 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:39:47 crc kubenswrapper[4949]: E0216 12:39:47.239240 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:39:57 crc kubenswrapper[4949]: I0216 12:39:57.025462 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-85b64669c-vc5kk_b0fa6cb2-2288-43e5-bd0a-065e92f72ece/kube-rbac-proxy/0.log" Feb 16 12:39:57 crc kubenswrapper[4949]: I0216 12:39:57.103876 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-85b64669c-vc5kk_b0fa6cb2-2288-43e5-bd0a-065e92f72ece/manager/0.log" Feb 16 12:40:00 crc kubenswrapper[4949]: E0216 12:40:00.238146 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:40:01 crc kubenswrapper[4949]: E0216 12:40:01.248698 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:40:04 crc kubenswrapper[4949]: I0216 12:40:04.550479 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:40:04 crc kubenswrapper[4949]: I0216 12:40:04.550749 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:40:04 crc kubenswrapper[4949]: I0216 12:40:04.550791 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 12:40:04 crc kubenswrapper[4949]: I0216 12:40:04.551650 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d10535a2c33e7c98837e7814d2faabb2c1191b7db54bf9280f66b8dd9f5db5f9"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 12:40:04 crc kubenswrapper[4949]: I0216 12:40:04.551722 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://d10535a2c33e7c98837e7814d2faabb2c1191b7db54bf9280f66b8dd9f5db5f9" gracePeriod=600 Feb 16 12:40:05 crc kubenswrapper[4949]: I0216 12:40:05.442831 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="d10535a2c33e7c98837e7814d2faabb2c1191b7db54bf9280f66b8dd9f5db5f9" exitCode=0 Feb 16 12:40:05 crc kubenswrapper[4949]: I0216 12:40:05.442920 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"d10535a2c33e7c98837e7814d2faabb2c1191b7db54bf9280f66b8dd9f5db5f9"} Feb 16 12:40:05 crc kubenswrapper[4949]: I0216 12:40:05.443257 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerStarted","Data":"a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4"} Feb 16 12:40:05 crc kubenswrapper[4949]: I0216 12:40:05.443288 4949 scope.go:117] "RemoveContainer" containerID="db5b7272f3abee92a666dd8fd36656846a70280da77aae2fa351ddb1613efa83" Feb 16 12:40:12 crc kubenswrapper[4949]: I0216 12:40:12.001594 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-zgpcq_f5bc5773-497e-40be-ba68-82d49b1fd949/prometheus-operator/0.log" Feb 16 12:40:12 crc kubenswrapper[4949]: I0216 12:40:12.157460 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_3e0ff572-a323-409b-be25-ad0bceff59a5/prometheus-operator-admission-webhook/0.log" Feb 16 12:40:12 crc kubenswrapper[4949]: I0216 12:40:12.250736 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_e21a80a9-650f-42d0-9cd4-6aaa334423a3/prometheus-operator-admission-webhook/0.log" Feb 16 12:40:12 crc kubenswrapper[4949]: I0216 12:40:12.374811 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-sr5rv_0857cd6c-04ee-449a-88ed-99093185d7f5/operator/0.log" Feb 16 12:40:12 crc kubenswrapper[4949]: I0216 12:40:12.464382 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-qzxn6_f99782ba-1386-4f77-ba13-bb2fd7ab6935/observability-ui-dashboards/0.log" Feb 16 12:40:12 crc kubenswrapper[4949]: I0216 12:40:12.557421 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-2l4xd_cf4e8b11-ca69-4d30-97d8-935339316048/perses-operator/0.log" Feb 16 12:40:14 crc kubenswrapper[4949]: E0216 12:40:14.237522 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:40:16 crc kubenswrapper[4949]: E0216 12:40:16.237259 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:40:27 crc kubenswrapper[4949]: E0216 12:40:27.237827 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:40:27 crc kubenswrapper[4949]: I0216 12:40:27.601793 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-c769fd969-wlw2b_1b22393b-f597-4dad-8eb6-587d7a82e31b/cluster-logging-operator/0.log" Feb 16 12:40:27 crc kubenswrapper[4949]: I0216 12:40:27.808130 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-tv7fp_dfef0ca9-030f-4e8f-9804-ac2000c6bc75/collector/0.log" Feb 16 12:40:27 crc kubenswrapper[4949]: I0216 12:40:27.821867 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_a2cbb075-2878-4d94-adf5-c92888ff4c2a/loki-compactor/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.000978 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-5d5548c9f5-dl75n_52cb84ef-5333-451e-aa69-7af33124627b/loki-distributor/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.055616 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7d9d97666-94s4k_4c4cf393-1f34-415d-bd8a-2cd87dc62593/gateway/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.126505 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7d9d97666-94s4k_4c4cf393-1f34-415d-bd8a-2cd87dc62593/opa/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.203436 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7d9d97666-nk5qs_46c6e5be-0462-40bf-ab5e-f052cb9163b6/gateway/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.220892 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7d9d97666-nk5qs_46c6e5be-0462-40bf-ab5e-f052cb9163b6/opa/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.383153 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_05030bcd-7441-4c52-b653-e1f112e5d7ff/loki-index-gateway/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.447941 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_d60af4cc-ac63-40af-8a3a-2933fb25ffc2/loki-ingester/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.566688 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-76bf7b6d45-gcct7_c8fca297-202a-44af-81f7-ecab29bc0472/loki-querier/0.log" Feb 16 12:40:28 crc kubenswrapper[4949]: I0216 12:40:28.662729 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-6d6859c548-8h6lw_27b4f51a-e116-4a53-adec-61ef733835ca/loki-query-frontend/0.log" Feb 16 12:40:29 crc kubenswrapper[4949]: E0216 12:40:29.240056 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:40:39 crc kubenswrapper[4949]: E0216 12:40:39.237984 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:40:40 crc kubenswrapper[4949]: E0216 12:40:40.237457 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:40:44 crc kubenswrapper[4949]: I0216 12:40:44.652252 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-69bbfbf88f-s6ht6_deae7c82-4cc7-4bb3-a96b-fd537f41fe89/kube-rbac-proxy/0.log" Feb 16 12:40:44 crc kubenswrapper[4949]: I0216 12:40:44.794471 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-69bbfbf88f-s6ht6_deae7c82-4cc7-4bb3-a96b-fd537f41fe89/controller/0.log" Feb 16 12:40:44 crc kubenswrapper[4949]: I0216 12:40:44.915802 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-frr-files/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.084152 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-frr-files/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.105570 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-reloader/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.137416 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-metrics/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.143153 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-reloader/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.359877 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-frr-files/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.409113 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-reloader/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.409242 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-metrics/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.417041 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-metrics/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.546054 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-frr-files/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.589125 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-metrics/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.591280 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/cp-reloader/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.622840 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/controller/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.790970 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/frr-metrics/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.797343 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/kube-rbac-proxy/0.log" Feb 16 12:40:45 crc kubenswrapper[4949]: I0216 12:40:45.855937 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/kube-rbac-proxy-frr/0.log" Feb 16 12:40:46 crc kubenswrapper[4949]: I0216 12:40:46.018366 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/reloader/0.log" Feb 16 12:40:46 crc kubenswrapper[4949]: I0216 12:40:46.064617 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-78b44bf5bb-5tk44_2fc23a3f-060b-4441-9ed0-b9a6749338be/frr-k8s-webhook-server/0.log" Feb 16 12:40:46 crc kubenswrapper[4949]: I0216 12:40:46.244342 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-858d98cccb-ckn97_c8bcb521-dc2b-4646-898a-f488d0626ebb/manager/0.log" Feb 16 12:40:47 crc kubenswrapper[4949]: I0216 12:40:47.019701 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7c75d8dfd-vlp9v_17fd589b-a008-4648-bf62-cf0bfceb4878/webhook-server/0.log" Feb 16 12:40:47 crc kubenswrapper[4949]: I0216 12:40:47.041699 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-vfbbw_f007c1dd-b910-41d6-96d9-1642b8eec8c3/kube-rbac-proxy/0.log" Feb 16 12:40:47 crc kubenswrapper[4949]: I0216 12:40:47.789094 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knrks_e39d2618-f332-4236-9437-77af6dc23e3d/frr/0.log" Feb 16 12:40:47 crc kubenswrapper[4949]: I0216 12:40:47.880911 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-vfbbw_f007c1dd-b910-41d6-96d9-1642b8eec8c3/speaker/0.log" Feb 16 12:40:51 crc kubenswrapper[4949]: E0216 12:40:51.274940 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:40:52 crc kubenswrapper[4949]: E0216 12:40:52.239658 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:41:00 crc kubenswrapper[4949]: I0216 12:41:00.407333 4949 scope.go:117] "RemoveContainer" containerID="7cb19f4edb72150b0b03e97d8f56bd1a8b99cab1f268657fb5d8f0e37d292110" Feb 16 12:41:00 crc kubenswrapper[4949]: I0216 12:41:00.441587 4949 scope.go:117] "RemoveContainer" containerID="ef6d51d799e827ae1ae8cf82df6c401b57745b2f5166b0d9594d25dc112bb1ed" Feb 16 12:41:00 crc kubenswrapper[4949]: I0216 12:41:00.745826 4949 scope.go:117] "RemoveContainer" containerID="04a86b661731d11bcb96821ed67102df11d2d231632a7f326f98122a51524f85" Feb 16 12:41:02 crc kubenswrapper[4949]: I0216 12:41:02.722682 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8_97f849a0-661e-46b3-981e-3933736a1cce/util/0.log" Feb 16 12:41:02 crc kubenswrapper[4949]: I0216 12:41:02.998393 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8_97f849a0-661e-46b3-981e-3933736a1cce/util/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.051765 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8_97f849a0-661e-46b3-981e-3933736a1cce/pull/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.173254 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8_97f849a0-661e-46b3-981e-3933736a1cce/pull/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.401958 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8_97f849a0-661e-46b3-981e-3933736a1cce/pull/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.417851 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8_97f849a0-661e-46b3-981e-3933736a1cce/extract/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.424216 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19mjhj8_97f849a0-661e-46b3-981e-3933736a1cce/util/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.582910 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8_a03698a2-f417-46be-a245-088d7a9a5ac5/util/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.814853 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8_a03698a2-f417-46be-a245-088d7a9a5ac5/pull/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.842880 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8_a03698a2-f417-46be-a245-088d7a9a5ac5/pull/0.log" Feb 16 12:41:03 crc kubenswrapper[4949]: I0216 12:41:03.894128 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8_a03698a2-f417-46be-a245-088d7a9a5ac5/util/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.070189 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8_a03698a2-f417-46be-a245-088d7a9a5ac5/pull/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.116967 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8_a03698a2-f417-46be-a245-088d7a9a5ac5/util/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.134933 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08zbjq8_a03698a2-f417-46be-a245-088d7a9a5ac5/extract/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: E0216 12:41:04.238779 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.263274 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n_81192a09-60ad-4403-85e3-e4994c0f4bd3/util/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.537048 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n_81192a09-60ad-4403-85e3-e4994c0f4bd3/pull/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.542704 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n_81192a09-60ad-4403-85e3-e4994c0f4bd3/util/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.569655 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n_81192a09-60ad-4403-85e3-e4994c0f4bd3/pull/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.759881 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n_81192a09-60ad-4403-85e3-e4994c0f4bd3/pull/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.803081 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n_81192a09-60ad-4403-85e3-e4994c0f4bd3/util/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.817920 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213b862n_81192a09-60ad-4403-85e3-e4994c0f4bd3/extract/0.log" Feb 16 12:41:04 crc kubenswrapper[4949]: I0216 12:41:04.963992 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s6hs5_72ec1df3-0f00-466a-ae9c-77294ac6ed28/extract-utilities/0.log" Feb 16 12:41:05 crc kubenswrapper[4949]: I0216 12:41:05.216461 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s6hs5_72ec1df3-0f00-466a-ae9c-77294ac6ed28/extract-content/0.log" Feb 16 12:41:05 crc kubenswrapper[4949]: I0216 12:41:05.217018 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s6hs5_72ec1df3-0f00-466a-ae9c-77294ac6ed28/extract-content/0.log" Feb 16 12:41:05 crc kubenswrapper[4949]: I0216 12:41:05.229321 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s6hs5_72ec1df3-0f00-466a-ae9c-77294ac6ed28/extract-utilities/0.log" Feb 16 12:41:05 crc kubenswrapper[4949]: I0216 12:41:05.676271 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s6hs5_72ec1df3-0f00-466a-ae9c-77294ac6ed28/extract-content/0.log" Feb 16 12:41:05 crc kubenswrapper[4949]: I0216 12:41:05.784066 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s6hs5_72ec1df3-0f00-466a-ae9c-77294ac6ed28/extract-utilities/0.log" Feb 16 12:41:05 crc kubenswrapper[4949]: I0216 12:41:05.926376 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2ckdb_c3335bb3-fadb-4f0f-bf0f-5632510a5a06/extract-utilities/0.log" Feb 16 12:41:06 crc kubenswrapper[4949]: I0216 12:41:06.210801 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2ckdb_c3335bb3-fadb-4f0f-bf0f-5632510a5a06/extract-content/0.log" Feb 16 12:41:06 crc kubenswrapper[4949]: I0216 12:41:06.318879 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2ckdb_c3335bb3-fadb-4f0f-bf0f-5632510a5a06/extract-content/0.log" Feb 16 12:41:06 crc kubenswrapper[4949]: I0216 12:41:06.330985 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2ckdb_c3335bb3-fadb-4f0f-bf0f-5632510a5a06/extract-utilities/0.log" Feb 16 12:41:06 crc kubenswrapper[4949]: I0216 12:41:06.599397 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s6hs5_72ec1df3-0f00-466a-ae9c-77294ac6ed28/registry-server/0.log" Feb 16 12:41:06 crc kubenswrapper[4949]: I0216 12:41:06.814941 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2ckdb_c3335bb3-fadb-4f0f-bf0f-5632510a5a06/extract-utilities/0.log" Feb 16 12:41:06 crc kubenswrapper[4949]: I0216 12:41:06.927809 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2ckdb_c3335bb3-fadb-4f0f-bf0f-5632510a5a06/extract-content/0.log" Feb 16 12:41:07 crc kubenswrapper[4949]: I0216 12:41:07.158516 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd_70c09ca8-bdb5-446a-8b4f-f57accb479a5/util/0.log" Feb 16 12:41:07 crc kubenswrapper[4949]: E0216 12:41:07.272350 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:41:07 crc kubenswrapper[4949]: I0216 12:41:07.489240 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd_70c09ca8-bdb5-446a-8b4f-f57accb479a5/util/0.log" Feb 16 12:41:07 crc kubenswrapper[4949]: I0216 12:41:07.569284 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd_70c09ca8-bdb5-446a-8b4f-f57accb479a5/pull/0.log" Feb 16 12:41:07 crc kubenswrapper[4949]: I0216 12:41:07.569611 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd_70c09ca8-bdb5-446a-8b4f-f57accb479a5/pull/0.log" Feb 16 12:41:07 crc kubenswrapper[4949]: I0216 12:41:07.628522 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2ckdb_c3335bb3-fadb-4f0f-bf0f-5632510a5a06/registry-server/0.log" Feb 16 12:41:07 crc kubenswrapper[4949]: I0216 12:41:07.826003 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd_70c09ca8-bdb5-446a-8b4f-f57accb479a5/extract/0.log" Feb 16 12:41:07 crc kubenswrapper[4949]: I0216 12:41:07.834430 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd_70c09ca8-bdb5-446a-8b4f-f57accb479a5/util/0.log" Feb 16 12:41:07 crc kubenswrapper[4949]: I0216 12:41:07.836452 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e08989d4dpd_70c09ca8-bdb5-446a-8b4f-f57accb479a5/pull/0.log" Feb 16 12:41:08 crc kubenswrapper[4949]: I0216 12:41:08.517459 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4_1d26d747-8606-4803-a80f-fe1b8ae10e24/util/0.log" Feb 16 12:41:08 crc kubenswrapper[4949]: I0216 12:41:08.758330 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4_1d26d747-8606-4803-a80f-fe1b8ae10e24/pull/0.log" Feb 16 12:41:08 crc kubenswrapper[4949]: I0216 12:41:08.758750 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4_1d26d747-8606-4803-a80f-fe1b8ae10e24/pull/0.log" Feb 16 12:41:08 crc kubenswrapper[4949]: I0216 12:41:08.770210 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4_1d26d747-8606-4803-a80f-fe1b8ae10e24/util/0.log" Feb 16 12:41:08 crc kubenswrapper[4949]: I0216 12:41:08.911895 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4_1d26d747-8606-4803-a80f-fe1b8ae10e24/util/0.log" Feb 16 12:41:08 crc kubenswrapper[4949]: I0216 12:41:08.987106 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4_1d26d747-8606-4803-a80f-fe1b8ae10e24/extract/0.log" Feb 16 12:41:08 crc kubenswrapper[4949]: I0216 12:41:08.994507 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-pp4fp_80cb28ef-0fde-4224-84e3-df2e1ca5ffce/marketplace-operator/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.002507 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecakpkd4_1d26d747-8606-4803-a80f-fe1b8ae10e24/pull/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.193950 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kp26g_78507519-366b-4876-8361-caa27244e918/extract-utilities/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.379378 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kp26g_78507519-366b-4876-8361-caa27244e918/extract-content/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.399667 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kp26g_78507519-366b-4876-8361-caa27244e918/extract-content/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.410125 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kp26g_78507519-366b-4876-8361-caa27244e918/extract-utilities/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.644292 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kp26g_78507519-366b-4876-8361-caa27244e918/extract-utilities/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.658618 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kp26g_78507519-366b-4876-8361-caa27244e918/extract-content/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.821746 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kp26g_78507519-366b-4876-8361-caa27244e918/registry-server/0.log" Feb 16 12:41:09 crc kubenswrapper[4949]: I0216 12:41:09.976291 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8lxg9_daf37823-eac2-462d-82ff-b54b8d3aaccd/extract-utilities/0.log" Feb 16 12:41:10 crc kubenswrapper[4949]: I0216 12:41:10.048536 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8lxg9_daf37823-eac2-462d-82ff-b54b8d3aaccd/extract-content/0.log" Feb 16 12:41:10 crc kubenswrapper[4949]: I0216 12:41:10.051948 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8lxg9_daf37823-eac2-462d-82ff-b54b8d3aaccd/extract-utilities/0.log" Feb 16 12:41:10 crc kubenswrapper[4949]: I0216 12:41:10.608324 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8lxg9_daf37823-eac2-462d-82ff-b54b8d3aaccd/extract-content/0.log" Feb 16 12:41:10 crc kubenswrapper[4949]: I0216 12:41:10.807807 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8lxg9_daf37823-eac2-462d-82ff-b54b8d3aaccd/extract-content/0.log" Feb 16 12:41:10 crc kubenswrapper[4949]: I0216 12:41:10.825595 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8lxg9_daf37823-eac2-462d-82ff-b54b8d3aaccd/extract-utilities/0.log" Feb 16 12:41:11 crc kubenswrapper[4949]: I0216 12:41:11.552853 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8lxg9_daf37823-eac2-462d-82ff-b54b8d3aaccd/registry-server/0.log" Feb 16 12:41:16 crc kubenswrapper[4949]: E0216 12:41:16.238379 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:41:22 crc kubenswrapper[4949]: E0216 12:41:22.238326 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:41:26 crc kubenswrapper[4949]: I0216 12:41:26.073860 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-zgpcq_f5bc5773-497e-40be-ba68-82d49b1fd949/prometheus-operator/0.log" Feb 16 12:41:26 crc kubenswrapper[4949]: I0216 12:41:26.089627 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7bccbc9f8c-954xl_3e0ff572-a323-409b-be25-ad0bceff59a5/prometheus-operator-admission-webhook/0.log" Feb 16 12:41:26 crc kubenswrapper[4949]: I0216 12:41:26.123759 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7bccbc9f8c-c2kbq_e21a80a9-650f-42d0-9cd4-6aaa334423a3/prometheus-operator-admission-webhook/0.log" Feb 16 12:41:26 crc kubenswrapper[4949]: I0216 12:41:26.264830 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-qzxn6_f99782ba-1386-4f77-ba13-bb2fd7ab6935/observability-ui-dashboards/0.log" Feb 16 12:41:26 crc kubenswrapper[4949]: I0216 12:41:26.291201 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-2l4xd_cf4e8b11-ca69-4d30-97d8-935339316048/perses-operator/0.log" Feb 16 12:41:26 crc kubenswrapper[4949]: I0216 12:41:26.301469 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-sr5rv_0857cd6c-04ee-449a-88ed-99093185d7f5/operator/0.log" Feb 16 12:41:31 crc kubenswrapper[4949]: E0216 12:41:31.244717 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:41:33 crc kubenswrapper[4949]: E0216 12:41:33.237848 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:41:42 crc kubenswrapper[4949]: I0216 12:41:42.549047 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-85b64669c-vc5kk_b0fa6cb2-2288-43e5-bd0a-065e92f72ece/kube-rbac-proxy/0.log" Feb 16 12:41:42 crc kubenswrapper[4949]: I0216 12:41:42.588629 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-85b64669c-vc5kk_b0fa6cb2-2288-43e5-bd0a-065e92f72ece/manager/0.log" Feb 16 12:41:43 crc kubenswrapper[4949]: E0216 12:41:43.239671 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:41:47 crc kubenswrapper[4949]: E0216 12:41:47.237114 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:41:57 crc kubenswrapper[4949]: E0216 12:41:57.237041 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:41:59 crc kubenswrapper[4949]: E0216 12:41:59.246811 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:42:04 crc kubenswrapper[4949]: I0216 12:42:04.550744 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:42:04 crc kubenswrapper[4949]: I0216 12:42:04.551518 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:42:11 crc kubenswrapper[4949]: E0216 12:42:11.256277 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:42:14 crc kubenswrapper[4949]: E0216 12:42:14.237450 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:42:26 crc kubenswrapper[4949]: E0216 12:42:26.246728 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:42:26 crc kubenswrapper[4949]: I0216 12:42:26.258151 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:42:26 crc kubenswrapper[4949]: E0216 12:42:26.347165 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:42:26 crc kubenswrapper[4949]: E0216 12:42:26.347228 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:42:26 crc kubenswrapper[4949]: E0216 12:42:26.347364 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:42:26 crc kubenswrapper[4949]: E0216 12:42:26.348569 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:42:34 crc kubenswrapper[4949]: I0216 12:42:34.550353 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:42:34 crc kubenswrapper[4949]: I0216 12:42:34.550809 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:42:38 crc kubenswrapper[4949]: E0216 12:42:38.237708 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:42:40 crc kubenswrapper[4949]: E0216 12:42:40.366786 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:42:40 crc kubenswrapper[4949]: E0216 12:42:40.366867 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:42:40 crc kubenswrapper[4949]: E0216 12:42:40.367001 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:42:40 crc kubenswrapper[4949]: E0216 12:42:40.368247 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:42:49 crc kubenswrapper[4949]: E0216 12:42:49.240380 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:42:55 crc kubenswrapper[4949]: E0216 12:42:55.240987 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:43:00 crc kubenswrapper[4949]: I0216 12:43:00.870889 4949 scope.go:117] "RemoveContainer" containerID="a885e1b189c781b3f6b1b3e7d49bf337c4656f379196c1e19e5f2672866853ab" Feb 16 12:43:00 crc kubenswrapper[4949]: I0216 12:43:00.903064 4949 scope.go:117] "RemoveContainer" containerID="953f7a0c2f631114f968e6690e90a872e3637d2efe170a0d0809bd56b5ca94a4" Feb 16 12:43:00 crc kubenswrapper[4949]: I0216 12:43:00.968131 4949 scope.go:117] "RemoveContainer" containerID="4977d1df46377eea0789cd9b59e90636fb9d8457c4854f6aea5641718759427f" Feb 16 12:43:01 crc kubenswrapper[4949]: I0216 12:43:01.022482 4949 scope.go:117] "RemoveContainer" containerID="e0ecfdb596d8e16dcb99e956083569640e1fdd56f0ec1d4b6f4dc939d13dbb6b" Feb 16 12:43:01 crc kubenswrapper[4949]: E0216 12:43:01.244575 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:43:04 crc kubenswrapper[4949]: I0216 12:43:04.551926 4949 patch_prober.go:28] interesting pod/machine-config-daemon-26lss container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 16 12:43:04 crc kubenswrapper[4949]: I0216 12:43:04.552449 4949 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 16 12:43:04 crc kubenswrapper[4949]: I0216 12:43:04.552489 4949 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-26lss" Feb 16 12:43:04 crc kubenswrapper[4949]: I0216 12:43:04.553305 4949 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4"} pod="openshift-machine-config-operator/machine-config-daemon-26lss" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 16 12:43:04 crc kubenswrapper[4949]: I0216 12:43:04.553363 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerName="machine-config-daemon" containerID="cri-o://a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" gracePeriod=600 Feb 16 12:43:04 crc kubenswrapper[4949]: E0216 12:43:04.686539 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:43:05 crc kubenswrapper[4949]: I0216 12:43:05.361746 4949 generic.go:334] "Generic (PLEG): container finished" podID="39ca5ab7-457c-4404-a3eb-f6acce74843b" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" exitCode=0 Feb 16 12:43:05 crc kubenswrapper[4949]: I0216 12:43:05.361804 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-26lss" event={"ID":"39ca5ab7-457c-4404-a3eb-f6acce74843b","Type":"ContainerDied","Data":"a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4"} Feb 16 12:43:05 crc kubenswrapper[4949]: I0216 12:43:05.361843 4949 scope.go:117] "RemoveContainer" containerID="d10535a2c33e7c98837e7814d2faabb2c1191b7db54bf9280f66b8dd9f5db5f9" Feb 16 12:43:05 crc kubenswrapper[4949]: I0216 12:43:05.362887 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:43:05 crc kubenswrapper[4949]: E0216 12:43:05.363373 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:43:10 crc kubenswrapper[4949]: E0216 12:43:10.238882 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:43:15 crc kubenswrapper[4949]: E0216 12:43:15.238467 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:43:20 crc kubenswrapper[4949]: I0216 12:43:20.235932 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:43:20 crc kubenswrapper[4949]: E0216 12:43:20.236769 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:43:21 crc kubenswrapper[4949]: E0216 12:43:21.244875 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:43:27 crc kubenswrapper[4949]: E0216 12:43:27.242068 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:43:32 crc kubenswrapper[4949]: E0216 12:43:32.237560 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:43:33 crc kubenswrapper[4949]: I0216 12:43:33.235799 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:43:33 crc kubenswrapper[4949]: E0216 12:43:33.236325 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:43:39 crc kubenswrapper[4949]: E0216 12:43:39.238818 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:43:42 crc kubenswrapper[4949]: I0216 12:43:42.780095 4949 generic.go:334] "Generic (PLEG): container finished" podID="462f2b95-35b2-4890-bba9-cb9c59356955" containerID="cc58e9f03385bc53a2f6e28aa1626183148747be37adb9c0fa215d67b1cce3c8" exitCode=0 Feb 16 12:43:42 crc kubenswrapper[4949]: I0216 12:43:42.780201 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" event={"ID":"462f2b95-35b2-4890-bba9-cb9c59356955","Type":"ContainerDied","Data":"cc58e9f03385bc53a2f6e28aa1626183148747be37adb9c0fa215d67b1cce3c8"} Feb 16 12:43:42 crc kubenswrapper[4949]: I0216 12:43:42.781472 4949 scope.go:117] "RemoveContainer" containerID="cc58e9f03385bc53a2f6e28aa1626183148747be37adb9c0fa215d67b1cce3c8" Feb 16 12:43:43 crc kubenswrapper[4949]: I0216 12:43:43.642710 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zc6qh_must-gather-8dsjg_462f2b95-35b2-4890-bba9-cb9c59356955/gather/0.log" Feb 16 12:43:45 crc kubenswrapper[4949]: E0216 12:43:45.236929 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:43:47 crc kubenswrapper[4949]: I0216 12:43:47.236851 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:43:47 crc kubenswrapper[4949]: E0216 12:43:47.237680 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:43:51 crc kubenswrapper[4949]: I0216 12:43:51.225337 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zc6qh/must-gather-8dsjg"] Feb 16 12:43:51 crc kubenswrapper[4949]: I0216 12:43:51.226149 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" podUID="462f2b95-35b2-4890-bba9-cb9c59356955" containerName="copy" containerID="cri-o://fd99e9c6e278a9fd6340338e3ff7b63eab2644a15e120e9b1c567673860be40e" gracePeriod=2 Feb 16 12:43:51 crc kubenswrapper[4949]: I0216 12:43:51.252363 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zc6qh/must-gather-8dsjg"] Feb 16 12:43:51 crc kubenswrapper[4949]: I0216 12:43:51.901270 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zc6qh_must-gather-8dsjg_462f2b95-35b2-4890-bba9-cb9c59356955/copy/0.log" Feb 16 12:43:51 crc kubenswrapper[4949]: I0216 12:43:51.902908 4949 generic.go:334] "Generic (PLEG): container finished" podID="462f2b95-35b2-4890-bba9-cb9c59356955" containerID="fd99e9c6e278a9fd6340338e3ff7b63eab2644a15e120e9b1c567673860be40e" exitCode=143 Feb 16 12:43:52 crc kubenswrapper[4949]: E0216 12:43:52.238260 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.262933 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zc6qh_must-gather-8dsjg_462f2b95-35b2-4890-bba9-cb9c59356955/copy/0.log" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.263500 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.390310 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7vzn\" (UniqueName: \"kubernetes.io/projected/462f2b95-35b2-4890-bba9-cb9c59356955-kube-api-access-j7vzn\") pod \"462f2b95-35b2-4890-bba9-cb9c59356955\" (UID: \"462f2b95-35b2-4890-bba9-cb9c59356955\") " Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.390466 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/462f2b95-35b2-4890-bba9-cb9c59356955-must-gather-output\") pod \"462f2b95-35b2-4890-bba9-cb9c59356955\" (UID: \"462f2b95-35b2-4890-bba9-cb9c59356955\") " Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.405233 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/462f2b95-35b2-4890-bba9-cb9c59356955-kube-api-access-j7vzn" (OuterVolumeSpecName: "kube-api-access-j7vzn") pod "462f2b95-35b2-4890-bba9-cb9c59356955" (UID: "462f2b95-35b2-4890-bba9-cb9c59356955"). InnerVolumeSpecName "kube-api-access-j7vzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.493932 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7vzn\" (UniqueName: \"kubernetes.io/projected/462f2b95-35b2-4890-bba9-cb9c59356955-kube-api-access-j7vzn\") on node \"crc\" DevicePath \"\"" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.590044 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/462f2b95-35b2-4890-bba9-cb9c59356955-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "462f2b95-35b2-4890-bba9-cb9c59356955" (UID: "462f2b95-35b2-4890-bba9-cb9c59356955"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.597445 4949 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/462f2b95-35b2-4890-bba9-cb9c59356955-must-gather-output\") on node \"crc\" DevicePath \"\"" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.915266 4949 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zc6qh_must-gather-8dsjg_462f2b95-35b2-4890-bba9-cb9c59356955/copy/0.log" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.916331 4949 scope.go:117] "RemoveContainer" containerID="fd99e9c6e278a9fd6340338e3ff7b63eab2644a15e120e9b1c567673860be40e" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.916388 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zc6qh/must-gather-8dsjg" Feb 16 12:43:52 crc kubenswrapper[4949]: I0216 12:43:52.957623 4949 scope.go:117] "RemoveContainer" containerID="cc58e9f03385bc53a2f6e28aa1626183148747be37adb9c0fa215d67b1cce3c8" Feb 16 12:43:53 crc kubenswrapper[4949]: I0216 12:43:53.276525 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="462f2b95-35b2-4890-bba9-cb9c59356955" path="/var/lib/kubelet/pods/462f2b95-35b2-4890-bba9-cb9c59356955/volumes" Feb 16 12:43:58 crc kubenswrapper[4949]: I0216 12:43:58.235811 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:43:58 crc kubenswrapper[4949]: E0216 12:43:58.236657 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:44:00 crc kubenswrapper[4949]: E0216 12:44:00.238370 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:44:07 crc kubenswrapper[4949]: E0216 12:44:07.238731 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:44:12 crc kubenswrapper[4949]: E0216 12:44:12.237935 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:44:13 crc kubenswrapper[4949]: I0216 12:44:13.236628 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:44:13 crc kubenswrapper[4949]: E0216 12:44:13.238075 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:44:22 crc kubenswrapper[4949]: E0216 12:44:22.240440 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:44:25 crc kubenswrapper[4949]: E0216 12:44:25.240862 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:44:28 crc kubenswrapper[4949]: I0216 12:44:28.235999 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:44:28 crc kubenswrapper[4949]: E0216 12:44:28.236975 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.922580 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qkgdb"] Feb 16 12:44:32 crc kubenswrapper[4949]: E0216 12:44:32.923957 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="462f2b95-35b2-4890-bba9-cb9c59356955" containerName="copy" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.923976 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="462f2b95-35b2-4890-bba9-cb9c59356955" containerName="copy" Feb 16 12:44:32 crc kubenswrapper[4949]: E0216 12:44:32.924010 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b70ab82e-eada-4f3e-9bb7-7c139df21770" containerName="container-00" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924019 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="b70ab82e-eada-4f3e-9bb7-7c139df21770" containerName="container-00" Feb 16 12:44:32 crc kubenswrapper[4949]: E0216 12:44:32.924032 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerName="extract-content" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924042 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerName="extract-content" Feb 16 12:44:32 crc kubenswrapper[4949]: E0216 12:44:32.924072 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="462f2b95-35b2-4890-bba9-cb9c59356955" containerName="gather" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924082 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="462f2b95-35b2-4890-bba9-cb9c59356955" containerName="gather" Feb 16 12:44:32 crc kubenswrapper[4949]: E0216 12:44:32.924103 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerName="registry-server" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924113 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerName="registry-server" Feb 16 12:44:32 crc kubenswrapper[4949]: E0216 12:44:32.924135 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerName="extract-utilities" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924143 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerName="extract-utilities" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924444 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="462f2b95-35b2-4890-bba9-cb9c59356955" containerName="gather" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924466 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="b70ab82e-eada-4f3e-9bb7-7c139df21770" containerName="container-00" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924476 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="462f2b95-35b2-4890-bba9-cb9c59356955" containerName="copy" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.924510 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="30805ff6-6e05-4234-a24d-739d7b59a5ee" containerName="registry-server" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.926943 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:32 crc kubenswrapper[4949]: I0216 12:44:32.942353 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qkgdb"] Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.041222 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvgb5\" (UniqueName: \"kubernetes.io/projected/f9cf16a7-248d-4d87-9421-cfee605379eb-kube-api-access-jvgb5\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.041716 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-catalog-content\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.041957 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-utilities\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.144110 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-utilities\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.144298 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvgb5\" (UniqueName: \"kubernetes.io/projected/f9cf16a7-248d-4d87-9421-cfee605379eb-kube-api-access-jvgb5\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.144467 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-catalog-content\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.144666 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-utilities\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.144914 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-catalog-content\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.166586 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvgb5\" (UniqueName: \"kubernetes.io/projected/f9cf16a7-248d-4d87-9421-cfee605379eb-kube-api-access-jvgb5\") pod \"redhat-marketplace-qkgdb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: E0216 12:44:33.238447 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.255506 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:33 crc kubenswrapper[4949]: I0216 12:44:33.868605 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qkgdb"] Feb 16 12:44:34 crc kubenswrapper[4949]: I0216 12:44:34.373293 4949 generic.go:334] "Generic (PLEG): container finished" podID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerID="c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab" exitCode=0 Feb 16 12:44:34 crc kubenswrapper[4949]: I0216 12:44:34.373389 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qkgdb" event={"ID":"f9cf16a7-248d-4d87-9421-cfee605379eb","Type":"ContainerDied","Data":"c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab"} Feb 16 12:44:34 crc kubenswrapper[4949]: I0216 12:44:34.373682 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qkgdb" event={"ID":"f9cf16a7-248d-4d87-9421-cfee605379eb","Type":"ContainerStarted","Data":"1280ca14cc4b464eb0cb0517c9b2210718f9b64408a4baba276b4aea2f7dba65"} Feb 16 12:44:35 crc kubenswrapper[4949]: I0216 12:44:35.387050 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qkgdb" event={"ID":"f9cf16a7-248d-4d87-9421-cfee605379eb","Type":"ContainerStarted","Data":"2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2"} Feb 16 12:44:36 crc kubenswrapper[4949]: I0216 12:44:36.397783 4949 generic.go:334] "Generic (PLEG): container finished" podID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerID="2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2" exitCode=0 Feb 16 12:44:36 crc kubenswrapper[4949]: I0216 12:44:36.397872 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qkgdb" event={"ID":"f9cf16a7-248d-4d87-9421-cfee605379eb","Type":"ContainerDied","Data":"2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2"} Feb 16 12:44:37 crc kubenswrapper[4949]: I0216 12:44:37.413878 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qkgdb" event={"ID":"f9cf16a7-248d-4d87-9421-cfee605379eb","Type":"ContainerStarted","Data":"8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901"} Feb 16 12:44:37 crc kubenswrapper[4949]: I0216 12:44:37.492574 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qkgdb" podStartSLOduration=3.042251715 podStartE2EDuration="5.492550167s" podCreationTimestamp="2026-02-16 12:44:32 +0000 UTC" firstStartedPulling="2026-02-16 12:44:34.376349385 +0000 UTC m=+5864.005683550" lastFinishedPulling="2026-02-16 12:44:36.826647847 +0000 UTC m=+5866.455982002" observedRunningTime="2026-02-16 12:44:37.440380891 +0000 UTC m=+5867.069715116" watchObservedRunningTime="2026-02-16 12:44:37.492550167 +0000 UTC m=+5867.121884332" Feb 16 12:44:39 crc kubenswrapper[4949]: E0216 12:44:39.237896 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:44:41 crc kubenswrapper[4949]: I0216 12:44:41.248623 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:44:41 crc kubenswrapper[4949]: E0216 12:44:41.249506 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:44:43 crc kubenswrapper[4949]: I0216 12:44:43.255869 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:43 crc kubenswrapper[4949]: I0216 12:44:43.256187 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:43 crc kubenswrapper[4949]: I0216 12:44:43.304789 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:43 crc kubenswrapper[4949]: I0216 12:44:43.527811 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:43 crc kubenswrapper[4949]: I0216 12:44:43.589474 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qkgdb"] Feb 16 12:44:45 crc kubenswrapper[4949]: E0216 12:44:45.237754 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:44:45 crc kubenswrapper[4949]: I0216 12:44:45.494534 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qkgdb" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerName="registry-server" containerID="cri-o://8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901" gracePeriod=2 Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.087522 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.178714 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-utilities\") pod \"f9cf16a7-248d-4d87-9421-cfee605379eb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.179032 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-catalog-content\") pod \"f9cf16a7-248d-4d87-9421-cfee605379eb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.179222 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvgb5\" (UniqueName: \"kubernetes.io/projected/f9cf16a7-248d-4d87-9421-cfee605379eb-kube-api-access-jvgb5\") pod \"f9cf16a7-248d-4d87-9421-cfee605379eb\" (UID: \"f9cf16a7-248d-4d87-9421-cfee605379eb\") " Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.179493 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-utilities" (OuterVolumeSpecName: "utilities") pod "f9cf16a7-248d-4d87-9421-cfee605379eb" (UID: "f9cf16a7-248d-4d87-9421-cfee605379eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.180163 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.185687 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9cf16a7-248d-4d87-9421-cfee605379eb-kube-api-access-jvgb5" (OuterVolumeSpecName: "kube-api-access-jvgb5") pod "f9cf16a7-248d-4d87-9421-cfee605379eb" (UID: "f9cf16a7-248d-4d87-9421-cfee605379eb"). InnerVolumeSpecName "kube-api-access-jvgb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.205744 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9cf16a7-248d-4d87-9421-cfee605379eb" (UID: "f9cf16a7-248d-4d87-9421-cfee605379eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.283401 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9cf16a7-248d-4d87-9421-cfee605379eb-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.284458 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvgb5\" (UniqueName: \"kubernetes.io/projected/f9cf16a7-248d-4d87-9421-cfee605379eb-kube-api-access-jvgb5\") on node \"crc\" DevicePath \"\"" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.515574 4949 generic.go:334] "Generic (PLEG): container finished" podID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerID="8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901" exitCode=0 Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.515661 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qkgdb" event={"ID":"f9cf16a7-248d-4d87-9421-cfee605379eb","Type":"ContainerDied","Data":"8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901"} Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.515709 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qkgdb" event={"ID":"f9cf16a7-248d-4d87-9421-cfee605379eb","Type":"ContainerDied","Data":"1280ca14cc4b464eb0cb0517c9b2210718f9b64408a4baba276b4aea2f7dba65"} Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.515763 4949 scope.go:117] "RemoveContainer" containerID="8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.516130 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qkgdb" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.550374 4949 scope.go:117] "RemoveContainer" containerID="2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.563255 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qkgdb"] Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.573198 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qkgdb"] Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.588142 4949 scope.go:117] "RemoveContainer" containerID="c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.629045 4949 scope.go:117] "RemoveContainer" containerID="8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901" Feb 16 12:44:46 crc kubenswrapper[4949]: E0216 12:44:46.629510 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901\": container with ID starting with 8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901 not found: ID does not exist" containerID="8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.629557 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901"} err="failed to get container status \"8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901\": rpc error: code = NotFound desc = could not find container \"8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901\": container with ID starting with 8e6aefeece14bbfeaddf4ea429f5d6e97096116e2b83c62a654787f9252e5901 not found: ID does not exist" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.629579 4949 scope.go:117] "RemoveContainer" containerID="2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2" Feb 16 12:44:46 crc kubenswrapper[4949]: E0216 12:44:46.629823 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2\": container with ID starting with 2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2 not found: ID does not exist" containerID="2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.629852 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2"} err="failed to get container status \"2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2\": rpc error: code = NotFound desc = could not find container \"2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2\": container with ID starting with 2f385a3278bf8b12c4e8697319f7d7f5d6b5a5d11b55fc1dd4ebdc005e1f09f2 not found: ID does not exist" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.629870 4949 scope.go:117] "RemoveContainer" containerID="c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab" Feb 16 12:44:46 crc kubenswrapper[4949]: E0216 12:44:46.630075 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab\": container with ID starting with c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab not found: ID does not exist" containerID="c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab" Feb 16 12:44:46 crc kubenswrapper[4949]: I0216 12:44:46.630103 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab"} err="failed to get container status \"c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab\": rpc error: code = NotFound desc = could not find container \"c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab\": container with ID starting with c63c6310d2706726f152599c14d95845b780bc360d4a8d3bfab5e9359f056aab not found: ID does not exist" Feb 16 12:44:47 crc kubenswrapper[4949]: I0216 12:44:47.254608 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" path="/var/lib/kubelet/pods/f9cf16a7-248d-4d87-9421-cfee605379eb/volumes" Feb 16 12:44:53 crc kubenswrapper[4949]: I0216 12:44:53.235737 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:44:53 crc kubenswrapper[4949]: E0216 12:44:53.236457 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:44:54 crc kubenswrapper[4949]: E0216 12:44:54.238712 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.320736 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6kvq4"] Feb 16 12:44:55 crc kubenswrapper[4949]: E0216 12:44:55.321598 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerName="extract-utilities" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.321617 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerName="extract-utilities" Feb 16 12:44:55 crc kubenswrapper[4949]: E0216 12:44:55.321628 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerName="registry-server" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.321634 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerName="registry-server" Feb 16 12:44:55 crc kubenswrapper[4949]: E0216 12:44:55.321654 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerName="extract-content" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.321660 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerName="extract-content" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.321948 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9cf16a7-248d-4d87-9421-cfee605379eb" containerName="registry-server" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.324420 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.343495 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6kvq4"] Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.418660 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhq44\" (UniqueName: \"kubernetes.io/projected/634b03e4-cb23-49c8-95bc-0db608ea0594-kube-api-access-hhq44\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.418752 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-utilities\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.418993 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-catalog-content\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.521409 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhq44\" (UniqueName: \"kubernetes.io/projected/634b03e4-cb23-49c8-95bc-0db608ea0594-kube-api-access-hhq44\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.521517 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-utilities\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.521765 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-catalog-content\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.522076 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-utilities\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.522165 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-catalog-content\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.541737 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhq44\" (UniqueName: \"kubernetes.io/projected/634b03e4-cb23-49c8-95bc-0db608ea0594-kube-api-access-hhq44\") pod \"redhat-operators-6kvq4\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:55 crc kubenswrapper[4949]: I0216 12:44:55.644136 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:44:56 crc kubenswrapper[4949]: I0216 12:44:56.174930 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6kvq4"] Feb 16 12:44:56 crc kubenswrapper[4949]: I0216 12:44:56.633724 4949 generic.go:334] "Generic (PLEG): container finished" podID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerID="8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9" exitCode=0 Feb 16 12:44:56 crc kubenswrapper[4949]: I0216 12:44:56.633817 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kvq4" event={"ID":"634b03e4-cb23-49c8-95bc-0db608ea0594","Type":"ContainerDied","Data":"8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9"} Feb 16 12:44:56 crc kubenswrapper[4949]: I0216 12:44:56.634805 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kvq4" event={"ID":"634b03e4-cb23-49c8-95bc-0db608ea0594","Type":"ContainerStarted","Data":"b881de6dfee636b7b1199eaa121e512d49b21ceba1a16f16b4d729537a38c1c7"} Feb 16 12:44:57 crc kubenswrapper[4949]: I0216 12:44:57.647068 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kvq4" event={"ID":"634b03e4-cb23-49c8-95bc-0db608ea0594","Type":"ContainerStarted","Data":"ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e"} Feb 16 12:44:58 crc kubenswrapper[4949]: E0216 12:44:58.238073 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.188417 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc"] Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.191088 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.193968 4949 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.194625 4949 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.200353 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc"] Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.383342 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/49e23739-17c8-4c00-9f18-e8329e71b744-kube-api-access-tq4j9\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.383610 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49e23739-17c8-4c00-9f18-e8329e71b744-config-volume\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.384011 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49e23739-17c8-4c00-9f18-e8329e71b744-secret-volume\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.486792 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49e23739-17c8-4c00-9f18-e8329e71b744-config-volume\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.486916 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49e23739-17c8-4c00-9f18-e8329e71b744-secret-volume\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.486992 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/49e23739-17c8-4c00-9f18-e8329e71b744-kube-api-access-tq4j9\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.488434 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49e23739-17c8-4c00-9f18-e8329e71b744-config-volume\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.515712 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49e23739-17c8-4c00-9f18-e8329e71b744-secret-volume\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.518545 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/49e23739-17c8-4c00-9f18-e8329e71b744-kube-api-access-tq4j9\") pod \"collect-profiles-29520765-fxwfc\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:00 crc kubenswrapper[4949]: I0216 12:45:00.817335 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:01 crc kubenswrapper[4949]: W0216 12:45:01.885316 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49e23739_17c8_4c00_9f18_e8329e71b744.slice/crio-0171afe90ba1a9be916c32532ecb3c6fc24a4ba51e5ebd19b3a5212f5b03d6a1 WatchSource:0}: Error finding container 0171afe90ba1a9be916c32532ecb3c6fc24a4ba51e5ebd19b3a5212f5b03d6a1: Status 404 returned error can't find the container with id 0171afe90ba1a9be916c32532ecb3c6fc24a4ba51e5ebd19b3a5212f5b03d6a1 Feb 16 12:45:01 crc kubenswrapper[4949]: I0216 12:45:01.887446 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc"] Feb 16 12:45:02 crc kubenswrapper[4949]: I0216 12:45:02.704158 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" event={"ID":"49e23739-17c8-4c00-9f18-e8329e71b744","Type":"ContainerStarted","Data":"9186d7a3fb626306d3c060a22f1f63ed5a73c534da1f7d6d57baa33e9db74f17"} Feb 16 12:45:02 crc kubenswrapper[4949]: I0216 12:45:02.704503 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" event={"ID":"49e23739-17c8-4c00-9f18-e8329e71b744","Type":"ContainerStarted","Data":"0171afe90ba1a9be916c32532ecb3c6fc24a4ba51e5ebd19b3a5212f5b03d6a1"} Feb 16 12:45:02 crc kubenswrapper[4949]: I0216 12:45:02.727672 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" podStartSLOduration=2.727630301 podStartE2EDuration="2.727630301s" podCreationTimestamp="2026-02-16 12:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-16 12:45:02.721139777 +0000 UTC m=+5892.350473942" watchObservedRunningTime="2026-02-16 12:45:02.727630301 +0000 UTC m=+5892.356964476" Feb 16 12:45:03 crc kubenswrapper[4949]: I0216 12:45:03.719770 4949 generic.go:334] "Generic (PLEG): container finished" podID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerID="ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e" exitCode=0 Feb 16 12:45:03 crc kubenswrapper[4949]: I0216 12:45:03.719853 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kvq4" event={"ID":"634b03e4-cb23-49c8-95bc-0db608ea0594","Type":"ContainerDied","Data":"ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e"} Feb 16 12:45:03 crc kubenswrapper[4949]: I0216 12:45:03.724034 4949 generic.go:334] "Generic (PLEG): container finished" podID="49e23739-17c8-4c00-9f18-e8329e71b744" containerID="9186d7a3fb626306d3c060a22f1f63ed5a73c534da1f7d6d57baa33e9db74f17" exitCode=0 Feb 16 12:45:03 crc kubenswrapper[4949]: I0216 12:45:03.724099 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" event={"ID":"49e23739-17c8-4c00-9f18-e8329e71b744","Type":"ContainerDied","Data":"9186d7a3fb626306d3c060a22f1f63ed5a73c534da1f7d6d57baa33e9db74f17"} Feb 16 12:45:04 crc kubenswrapper[4949]: I0216 12:45:04.737755 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kvq4" event={"ID":"634b03e4-cb23-49c8-95bc-0db608ea0594","Type":"ContainerStarted","Data":"59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0"} Feb 16 12:45:04 crc kubenswrapper[4949]: I0216 12:45:04.765414 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6kvq4" podStartSLOduration=2.231860318 podStartE2EDuration="9.765393408s" podCreationTimestamp="2026-02-16 12:44:55 +0000 UTC" firstStartedPulling="2026-02-16 12:44:56.63633092 +0000 UTC m=+5886.265665085" lastFinishedPulling="2026-02-16 12:45:04.16986402 +0000 UTC m=+5893.799198175" observedRunningTime="2026-02-16 12:45:04.761092287 +0000 UTC m=+5894.390426462" watchObservedRunningTime="2026-02-16 12:45:04.765393408 +0000 UTC m=+5894.394727573" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.139414 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.235255 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:45:05 crc kubenswrapper[4949]: E0216 12:45:05.235791 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.306598 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49e23739-17c8-4c00-9f18-e8329e71b744-secret-volume\") pod \"49e23739-17c8-4c00-9f18-e8329e71b744\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.307085 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/49e23739-17c8-4c00-9f18-e8329e71b744-kube-api-access-tq4j9\") pod \"49e23739-17c8-4c00-9f18-e8329e71b744\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.307438 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49e23739-17c8-4c00-9f18-e8329e71b744-config-volume\") pod \"49e23739-17c8-4c00-9f18-e8329e71b744\" (UID: \"49e23739-17c8-4c00-9f18-e8329e71b744\") " Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.307974 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49e23739-17c8-4c00-9f18-e8329e71b744-config-volume" (OuterVolumeSpecName: "config-volume") pod "49e23739-17c8-4c00-9f18-e8329e71b744" (UID: "49e23739-17c8-4c00-9f18-e8329e71b744"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.308701 4949 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49e23739-17c8-4c00-9f18-e8329e71b744-config-volume\") on node \"crc\" DevicePath \"\"" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.313392 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49e23739-17c8-4c00-9f18-e8329e71b744-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "49e23739-17c8-4c00-9f18-e8329e71b744" (UID: "49e23739-17c8-4c00-9f18-e8329e71b744"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.313806 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49e23739-17c8-4c00-9f18-e8329e71b744-kube-api-access-tq4j9" (OuterVolumeSpecName: "kube-api-access-tq4j9") pod "49e23739-17c8-4c00-9f18-e8329e71b744" (UID: "49e23739-17c8-4c00-9f18-e8329e71b744"). InnerVolumeSpecName "kube-api-access-tq4j9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.412926 4949 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49e23739-17c8-4c00-9f18-e8329e71b744-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.412964 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/49e23739-17c8-4c00-9f18-e8329e71b744-kube-api-access-tq4j9\") on node \"crc\" DevicePath \"\"" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.644305 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.644389 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.748477 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" event={"ID":"49e23739-17c8-4c00-9f18-e8329e71b744","Type":"ContainerDied","Data":"0171afe90ba1a9be916c32532ecb3c6fc24a4ba51e5ebd19b3a5212f5b03d6a1"} Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.748524 4949 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0171afe90ba1a9be916c32532ecb3c6fc24a4ba51e5ebd19b3a5212f5b03d6a1" Feb 16 12:45:05 crc kubenswrapper[4949]: I0216 12:45:05.749329 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29520765-fxwfc" Feb 16 12:45:06 crc kubenswrapper[4949]: I0216 12:45:06.221411 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl"] Feb 16 12:45:06 crc kubenswrapper[4949]: I0216 12:45:06.232231 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29520720-svnnl"] Feb 16 12:45:06 crc kubenswrapper[4949]: E0216 12:45:06.237998 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:45:06 crc kubenswrapper[4949]: I0216 12:45:06.699839 4949 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6kvq4" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="registry-server" probeResult="failure" output=< Feb 16 12:45:06 crc kubenswrapper[4949]: timeout: failed to connect service ":50051" within 1s Feb 16 12:45:06 crc kubenswrapper[4949]: > Feb 16 12:45:07 crc kubenswrapper[4949]: I0216 12:45:07.249497 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="845dbfa2-42a8-4303-9679-b5e238392546" path="/var/lib/kubelet/pods/845dbfa2-42a8-4303-9679-b5e238392546/volumes" Feb 16 12:45:10 crc kubenswrapper[4949]: E0216 12:45:10.238107 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:45:15 crc kubenswrapper[4949]: I0216 12:45:15.712371 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:45:15 crc kubenswrapper[4949]: I0216 12:45:15.760626 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:45:15 crc kubenswrapper[4949]: I0216 12:45:15.961367 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6kvq4"] Feb 16 12:45:16 crc kubenswrapper[4949]: I0216 12:45:16.881361 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6kvq4" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="registry-server" containerID="cri-o://59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0" gracePeriod=2 Feb 16 12:45:17 crc kubenswrapper[4949]: E0216 12:45:17.239257 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.466012 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.648081 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhq44\" (UniqueName: \"kubernetes.io/projected/634b03e4-cb23-49c8-95bc-0db608ea0594-kube-api-access-hhq44\") pod \"634b03e4-cb23-49c8-95bc-0db608ea0594\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.648731 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-catalog-content\") pod \"634b03e4-cb23-49c8-95bc-0db608ea0594\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.649048 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-utilities\") pod \"634b03e4-cb23-49c8-95bc-0db608ea0594\" (UID: \"634b03e4-cb23-49c8-95bc-0db608ea0594\") " Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.650078 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-utilities" (OuterVolumeSpecName: "utilities") pod "634b03e4-cb23-49c8-95bc-0db608ea0594" (UID: "634b03e4-cb23-49c8-95bc-0db608ea0594"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.657942 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/634b03e4-cb23-49c8-95bc-0db608ea0594-kube-api-access-hhq44" (OuterVolumeSpecName: "kube-api-access-hhq44") pod "634b03e4-cb23-49c8-95bc-0db608ea0594" (UID: "634b03e4-cb23-49c8-95bc-0db608ea0594"). InnerVolumeSpecName "kube-api-access-hhq44". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.754668 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.754821 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhq44\" (UniqueName: \"kubernetes.io/projected/634b03e4-cb23-49c8-95bc-0db608ea0594-kube-api-access-hhq44\") on node \"crc\" DevicePath \"\"" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.794320 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "634b03e4-cb23-49c8-95bc-0db608ea0594" (UID: "634b03e4-cb23-49c8-95bc-0db608ea0594"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.857116 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/634b03e4-cb23-49c8-95bc-0db608ea0594-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.893720 4949 generic.go:334] "Generic (PLEG): container finished" podID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerID="59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0" exitCode=0 Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.893795 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kvq4" event={"ID":"634b03e4-cb23-49c8-95bc-0db608ea0594","Type":"ContainerDied","Data":"59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0"} Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.893820 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kvq4" event={"ID":"634b03e4-cb23-49c8-95bc-0db608ea0594","Type":"ContainerDied","Data":"b881de6dfee636b7b1199eaa121e512d49b21ceba1a16f16b4d729537a38c1c7"} Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.893836 4949 scope.go:117] "RemoveContainer" containerID="59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.893957 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6kvq4" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.927986 4949 scope.go:117] "RemoveContainer" containerID="ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e" Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.932909 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6kvq4"] Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.944589 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6kvq4"] Feb 16 12:45:17 crc kubenswrapper[4949]: I0216 12:45:17.948628 4949 scope.go:117] "RemoveContainer" containerID="8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9" Feb 16 12:45:18 crc kubenswrapper[4949]: I0216 12:45:18.011563 4949 scope.go:117] "RemoveContainer" containerID="59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0" Feb 16 12:45:18 crc kubenswrapper[4949]: E0216 12:45:18.012090 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0\": container with ID starting with 59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0 not found: ID does not exist" containerID="59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0" Feb 16 12:45:18 crc kubenswrapper[4949]: I0216 12:45:18.012145 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0"} err="failed to get container status \"59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0\": rpc error: code = NotFound desc = could not find container \"59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0\": container with ID starting with 59447693044ddcc295e96e15e83ec1c1460f8d03c678ea766f278b9a8c203cc0 not found: ID does not exist" Feb 16 12:45:18 crc kubenswrapper[4949]: I0216 12:45:18.012190 4949 scope.go:117] "RemoveContainer" containerID="ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e" Feb 16 12:45:18 crc kubenswrapper[4949]: E0216 12:45:18.012480 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e\": container with ID starting with ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e not found: ID does not exist" containerID="ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e" Feb 16 12:45:18 crc kubenswrapper[4949]: I0216 12:45:18.012502 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e"} err="failed to get container status \"ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e\": rpc error: code = NotFound desc = could not find container \"ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e\": container with ID starting with ebb62c17bb264722656fd7cd4fa6b1a88015e9beb2edbb030b79f52ee24f4d1e not found: ID does not exist" Feb 16 12:45:18 crc kubenswrapper[4949]: I0216 12:45:18.012514 4949 scope.go:117] "RemoveContainer" containerID="8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9" Feb 16 12:45:18 crc kubenswrapper[4949]: E0216 12:45:18.012742 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9\": container with ID starting with 8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9 not found: ID does not exist" containerID="8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9" Feb 16 12:45:18 crc kubenswrapper[4949]: I0216 12:45:18.012787 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9"} err="failed to get container status \"8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9\": rpc error: code = NotFound desc = could not find container \"8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9\": container with ID starting with 8fe636efc1790c54df944fa37ffb57630952ca3e9a34f8522135ba31628afbd9 not found: ID does not exist" Feb 16 12:45:19 crc kubenswrapper[4949]: I0216 12:45:19.252520 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" path="/var/lib/kubelet/pods/634b03e4-cb23-49c8-95bc-0db608ea0594/volumes" Feb 16 12:45:20 crc kubenswrapper[4949]: I0216 12:45:20.235066 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:45:20 crc kubenswrapper[4949]: E0216 12:45:20.235461 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:45:25 crc kubenswrapper[4949]: E0216 12:45:25.237158 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:45:30 crc kubenswrapper[4949]: E0216 12:45:30.238380 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:45:35 crc kubenswrapper[4949]: I0216 12:45:35.235641 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:45:35 crc kubenswrapper[4949]: E0216 12:45:35.236441 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:45:38 crc kubenswrapper[4949]: E0216 12:45:38.237984 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:45:41 crc kubenswrapper[4949]: E0216 12:45:41.246355 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:45:46 crc kubenswrapper[4949]: I0216 12:45:46.235054 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:45:46 crc kubenswrapper[4949]: E0216 12:45:46.235878 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:45:53 crc kubenswrapper[4949]: E0216 12:45:53.237304 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:45:54 crc kubenswrapper[4949]: E0216 12:45:54.237847 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:46:01 crc kubenswrapper[4949]: I0216 12:46:01.191521 4949 scope.go:117] "RemoveContainer" containerID="fd592049b83cf64e5528128ba158c16671ee833698c87b4d9ab48188e9c5aa83" Feb 16 12:46:01 crc kubenswrapper[4949]: I0216 12:46:01.243302 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:46:01 crc kubenswrapper[4949]: E0216 12:46:01.243928 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:46:06 crc kubenswrapper[4949]: E0216 12:46:06.238738 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:46:08 crc kubenswrapper[4949]: E0216 12:46:08.237755 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:46:14 crc kubenswrapper[4949]: I0216 12:46:14.235699 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:46:14 crc kubenswrapper[4949]: E0216 12:46:14.236781 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:46:19 crc kubenswrapper[4949]: E0216 12:46:19.242463 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:46:19 crc kubenswrapper[4949]: E0216 12:46:19.243462 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:46:27 crc kubenswrapper[4949]: I0216 12:46:27.236084 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:46:27 crc kubenswrapper[4949]: E0216 12:46:27.236902 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:46:31 crc kubenswrapper[4949]: E0216 12:46:31.245575 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:46:34 crc kubenswrapper[4949]: E0216 12:46:34.237887 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:46:39 crc kubenswrapper[4949]: I0216 12:46:39.235413 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:46:39 crc kubenswrapper[4949]: E0216 12:46:39.236236 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:46:42 crc kubenswrapper[4949]: E0216 12:46:42.238685 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:46:48 crc kubenswrapper[4949]: E0216 12:46:48.238473 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:46:54 crc kubenswrapper[4949]: I0216 12:46:54.237241 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:46:54 crc kubenswrapper[4949]: E0216 12:46:54.237992 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:46:54 crc kubenswrapper[4949]: E0216 12:46:54.240001 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:47:00 crc kubenswrapper[4949]: E0216 12:47:00.237365 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:47:05 crc kubenswrapper[4949]: E0216 12:47:05.239940 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:47:09 crc kubenswrapper[4949]: I0216 12:47:09.237029 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:47:09 crc kubenswrapper[4949]: E0216 12:47:09.251458 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:47:13 crc kubenswrapper[4949]: E0216 12:47:13.238078 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.805023 4949 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zrlgd"] Feb 16 12:47:17 crc kubenswrapper[4949]: E0216 12:47:17.809230 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49e23739-17c8-4c00-9f18-e8329e71b744" containerName="collect-profiles" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.809418 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="49e23739-17c8-4c00-9f18-e8329e71b744" containerName="collect-profiles" Feb 16 12:47:17 crc kubenswrapper[4949]: E0216 12:47:17.809523 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="extract-content" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.809641 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="extract-content" Feb 16 12:47:17 crc kubenswrapper[4949]: E0216 12:47:17.809767 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="registry-server" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.809899 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="registry-server" Feb 16 12:47:17 crc kubenswrapper[4949]: E0216 12:47:17.810044 4949 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="extract-utilities" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.810168 4949 state_mem.go:107] "Deleted CPUSet assignment" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="extract-utilities" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.810699 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="634b03e4-cb23-49c8-95bc-0db608ea0594" containerName="registry-server" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.810849 4949 memory_manager.go:354] "RemoveStaleState removing state" podUID="49e23739-17c8-4c00-9f18-e8329e71b744" containerName="collect-profiles" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.816899 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.825670 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zrlgd"] Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.954319 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-utilities\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.954707 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-catalog-content\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:17 crc kubenswrapper[4949]: I0216 12:47:17.954742 4949 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phxlj\" (UniqueName: \"kubernetes.io/projected/43ed3b84-a0b0-454f-9281-351a67e87275-kube-api-access-phxlj\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:18 crc kubenswrapper[4949]: I0216 12:47:18.058045 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-catalog-content\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:18 crc kubenswrapper[4949]: I0216 12:47:18.058112 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phxlj\" (UniqueName: \"kubernetes.io/projected/43ed3b84-a0b0-454f-9281-351a67e87275-kube-api-access-phxlj\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:18 crc kubenswrapper[4949]: I0216 12:47:18.058392 4949 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-utilities\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:18 crc kubenswrapper[4949]: I0216 12:47:18.058569 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-catalog-content\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:18 crc kubenswrapper[4949]: I0216 12:47:18.058901 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-utilities\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:18 crc kubenswrapper[4949]: I0216 12:47:18.079273 4949 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phxlj\" (UniqueName: \"kubernetes.io/projected/43ed3b84-a0b0-454f-9281-351a67e87275-kube-api-access-phxlj\") pod \"certified-operators-zrlgd\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:18 crc kubenswrapper[4949]: I0216 12:47:18.180279 4949 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:18 crc kubenswrapper[4949]: W0216 12:47:18.775147 4949 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43ed3b84_a0b0_454f_9281_351a67e87275.slice/crio-ed285c3af358170413e98488c98d8228fff731567df2d10d31ae4e4753b884b9 WatchSource:0}: Error finding container ed285c3af358170413e98488c98d8228fff731567df2d10d31ae4e4753b884b9: Status 404 returned error can't find the container with id ed285c3af358170413e98488c98d8228fff731567df2d10d31ae4e4753b884b9 Feb 16 12:47:18 crc kubenswrapper[4949]: I0216 12:47:18.778536 4949 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zrlgd"] Feb 16 12:47:19 crc kubenswrapper[4949]: E0216 12:47:19.238374 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:47:19 crc kubenswrapper[4949]: I0216 12:47:19.302037 4949 generic.go:334] "Generic (PLEG): container finished" podID="43ed3b84-a0b0-454f-9281-351a67e87275" containerID="0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164" exitCode=0 Feb 16 12:47:19 crc kubenswrapper[4949]: I0216 12:47:19.302084 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrlgd" event={"ID":"43ed3b84-a0b0-454f-9281-351a67e87275","Type":"ContainerDied","Data":"0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164"} Feb 16 12:47:19 crc kubenswrapper[4949]: I0216 12:47:19.302116 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrlgd" event={"ID":"43ed3b84-a0b0-454f-9281-351a67e87275","Type":"ContainerStarted","Data":"ed285c3af358170413e98488c98d8228fff731567df2d10d31ae4e4753b884b9"} Feb 16 12:47:20 crc kubenswrapper[4949]: I0216 12:47:20.315130 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrlgd" event={"ID":"43ed3b84-a0b0-454f-9281-351a67e87275","Type":"ContainerStarted","Data":"e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff"} Feb 16 12:47:22 crc kubenswrapper[4949]: I0216 12:47:22.337297 4949 generic.go:334] "Generic (PLEG): container finished" podID="43ed3b84-a0b0-454f-9281-351a67e87275" containerID="e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff" exitCode=0 Feb 16 12:47:22 crc kubenswrapper[4949]: I0216 12:47:22.337414 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrlgd" event={"ID":"43ed3b84-a0b0-454f-9281-351a67e87275","Type":"ContainerDied","Data":"e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff"} Feb 16 12:47:23 crc kubenswrapper[4949]: I0216 12:47:23.236057 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:47:23 crc kubenswrapper[4949]: E0216 12:47:23.236842 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:47:23 crc kubenswrapper[4949]: I0216 12:47:23.354878 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrlgd" event={"ID":"43ed3b84-a0b0-454f-9281-351a67e87275","Type":"ContainerStarted","Data":"5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e"} Feb 16 12:47:23 crc kubenswrapper[4949]: I0216 12:47:23.383403 4949 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zrlgd" podStartSLOduration=2.953278658 podStartE2EDuration="6.383373023s" podCreationTimestamp="2026-02-16 12:47:17 +0000 UTC" firstStartedPulling="2026-02-16 12:47:19.304155493 +0000 UTC m=+6028.933489658" lastFinishedPulling="2026-02-16 12:47:22.734249868 +0000 UTC m=+6032.363584023" observedRunningTime="2026-02-16 12:47:23.377236579 +0000 UTC m=+6033.006570754" watchObservedRunningTime="2026-02-16 12:47:23.383373023 +0000 UTC m=+6033.012707178" Feb 16 12:47:28 crc kubenswrapper[4949]: I0216 12:47:28.180546 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:28 crc kubenswrapper[4949]: I0216 12:47:28.181101 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:28 crc kubenswrapper[4949]: I0216 12:47:28.232979 4949 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:28 crc kubenswrapper[4949]: I0216 12:47:28.239029 4949 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 16 12:47:28 crc kubenswrapper[4949]: E0216 12:47:28.370953 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:47:28 crc kubenswrapper[4949]: E0216 12:47:28.371030 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 16 12:47:28 crc kubenswrapper[4949]: E0216 12:47:28.371165 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h5dh7bh65bhcch65chc4h547h5d4h5c7h5dch5c8h74hb9h5f4hd8h79h7h59bh559h56bh9bhbch67bh68bh575h5cbh658h5bch7bhcch5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k7p7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c69d7379-6f2b-45ae-8972-71e223a337a8): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:47:28 crc kubenswrapper[4949]: E0216 12:47:28.372427 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:47:28 crc kubenswrapper[4949]: I0216 12:47:28.469442 4949 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:28 crc kubenswrapper[4949]: I0216 12:47:28.527988 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zrlgd"] Feb 16 12:47:30 crc kubenswrapper[4949]: E0216 12:47:30.237305 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da" Feb 16 12:47:30 crc kubenswrapper[4949]: I0216 12:47:30.441139 4949 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zrlgd" podUID="43ed3b84-a0b0-454f-9281-351a67e87275" containerName="registry-server" containerID="cri-o://5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e" gracePeriod=2 Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.056716 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.199367 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phxlj\" (UniqueName: \"kubernetes.io/projected/43ed3b84-a0b0-454f-9281-351a67e87275-kube-api-access-phxlj\") pod \"43ed3b84-a0b0-454f-9281-351a67e87275\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.199697 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-utilities\") pod \"43ed3b84-a0b0-454f-9281-351a67e87275\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.199838 4949 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-catalog-content\") pod \"43ed3b84-a0b0-454f-9281-351a67e87275\" (UID: \"43ed3b84-a0b0-454f-9281-351a67e87275\") " Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.200568 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-utilities" (OuterVolumeSpecName: "utilities") pod "43ed3b84-a0b0-454f-9281-351a67e87275" (UID: "43ed3b84-a0b0-454f-9281-351a67e87275"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.204890 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43ed3b84-a0b0-454f-9281-351a67e87275-kube-api-access-phxlj" (OuterVolumeSpecName: "kube-api-access-phxlj") pod "43ed3b84-a0b0-454f-9281-351a67e87275" (UID: "43ed3b84-a0b0-454f-9281-351a67e87275"). InnerVolumeSpecName "kube-api-access-phxlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.257919 4949 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "43ed3b84-a0b0-454f-9281-351a67e87275" (UID: "43ed3b84-a0b0-454f-9281-351a67e87275"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.302735 4949 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.302776 4949 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phxlj\" (UniqueName: \"kubernetes.io/projected/43ed3b84-a0b0-454f-9281-351a67e87275-kube-api-access-phxlj\") on node \"crc\" DevicePath \"\"" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.302786 4949 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43ed3b84-a0b0-454f-9281-351a67e87275-utilities\") on node \"crc\" DevicePath \"\"" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.452888 4949 generic.go:334] "Generic (PLEG): container finished" podID="43ed3b84-a0b0-454f-9281-351a67e87275" containerID="5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e" exitCode=0 Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.452937 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrlgd" event={"ID":"43ed3b84-a0b0-454f-9281-351a67e87275","Type":"ContainerDied","Data":"5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e"} Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.452966 4949 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrlgd" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.452974 4949 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrlgd" event={"ID":"43ed3b84-a0b0-454f-9281-351a67e87275","Type":"ContainerDied","Data":"ed285c3af358170413e98488c98d8228fff731567df2d10d31ae4e4753b884b9"} Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.452997 4949 scope.go:117] "RemoveContainer" containerID="5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.471633 4949 scope.go:117] "RemoveContainer" containerID="e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.502146 4949 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zrlgd"] Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.513099 4949 scope.go:117] "RemoveContainer" containerID="0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.528599 4949 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zrlgd"] Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.561349 4949 scope.go:117] "RemoveContainer" containerID="5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e" Feb 16 12:47:31 crc kubenswrapper[4949]: E0216 12:47:31.562128 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e\": container with ID starting with 5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e not found: ID does not exist" containerID="5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.562169 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e"} err="failed to get container status \"5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e\": rpc error: code = NotFound desc = could not find container \"5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e\": container with ID starting with 5d19acf4cadd18c006b35c0bd0e149824e1567456f440bc74c37af8ffafa4d1e not found: ID does not exist" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.562211 4949 scope.go:117] "RemoveContainer" containerID="e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff" Feb 16 12:47:31 crc kubenswrapper[4949]: E0216 12:47:31.562520 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff\": container with ID starting with e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff not found: ID does not exist" containerID="e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.562551 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff"} err="failed to get container status \"e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff\": rpc error: code = NotFound desc = could not find container \"e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff\": container with ID starting with e353e6c32d849e94fccf5098b820d377effbca9e3c3c1c449891d23708a1f9ff not found: ID does not exist" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.562566 4949 scope.go:117] "RemoveContainer" containerID="0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164" Feb 16 12:47:31 crc kubenswrapper[4949]: E0216 12:47:31.562841 4949 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164\": container with ID starting with 0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164 not found: ID does not exist" containerID="0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164" Feb 16 12:47:31 crc kubenswrapper[4949]: I0216 12:47:31.562871 4949 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164"} err="failed to get container status \"0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164\": rpc error: code = NotFound desc = could not find container \"0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164\": container with ID starting with 0f1079c1754dec6b156561b18e39ab2a552af937b4b71d032b087815c4fcd164 not found: ID does not exist" Feb 16 12:47:33 crc kubenswrapper[4949]: I0216 12:47:33.251363 4949 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43ed3b84-a0b0-454f-9281-351a67e87275" path="/var/lib/kubelet/pods/43ed3b84-a0b0-454f-9281-351a67e87275/volumes" Feb 16 12:47:35 crc kubenswrapper[4949]: I0216 12:47:35.236190 4949 scope.go:117] "RemoveContainer" containerID="a788d46c22bcba34c69e62fd8bc394fd4dba565c14d59b49c30694ff0d3923f4" Feb 16 12:47:35 crc kubenswrapper[4949]: E0216 12:47:35.236790 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-26lss_openshift-machine-config-operator(39ca5ab7-457c-4404-a3eb-f6acce74843b)\"" pod="openshift-machine-config-operator/machine-config-daemon-26lss" podUID="39ca5ab7-457c-4404-a3eb-f6acce74843b" Feb 16 12:47:43 crc kubenswrapper[4949]: E0216 12:47:43.237720 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="c69d7379-6f2b-45ae-8972-71e223a337a8" Feb 16 12:47:45 crc kubenswrapper[4949]: E0216 12:47:45.382786 4949 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:47:45 crc kubenswrapper[4949]: E0216 12:47:45.383883 4949 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 16 12:47:45 crc kubenswrapper[4949]: E0216 12:47:45.384267 4949 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksbml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5lgds_openstack(a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 16 12:47:45 crc kubenswrapper[4949]: E0216 12:47:45.385359 4949 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-5lgds" podUID="a24cd521-cfbb-48ec-b8f5-4c8b26c0b6da"